code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
lowerCAmelCase : Tuple = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
lowerCAmelCase : Any = numbers[i]
if number < 0:
lowerCAmelCase , lowerCAmelCase : str = min_till_now, max_till_now
lowerCAmelCase : Tuple = max(SCREAMING_SNAKE_CASE , max_till_now * number )
lowerCAmelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase : List[str] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , "words.txt" )
lowerCAmelCase : List[Any] = ""
with open(SCREAMING_SNAKE_CASE ) as f:
lowerCAmelCase : int = f.readline()
lowerCAmelCase : int = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowerCAmelCase : Tuple = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =StableDiffusionInpaintPipeline
a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
a : str =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : Any =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a : Optional[Any] =frozenset([] )
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , )
lowerCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase : int = CLIPTextModel(snake_case__ )
lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : str = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((64, 64) )
lowerCAmelCase : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Optional[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : Tuple = StableDiffusionInpaintPipeline(**snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Optional[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowerCAmelCase : Tuple = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase : Dict = torch.manual_seed(0 )
lowerCAmelCase : str = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowerCAmelCase : Dict = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : str = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : Tuple = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase : List[str] = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase : Tuple = PNDMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
lowerCAmelCase : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , scheduler=snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase : int = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import heapq
def a__ ( SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
lowerCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(SCREAMING_SNAKE_CASE , [-1 * len(SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCAmelCase : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCAmelCase : Optional[Any] = heapq.heappop(SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCAmelCase : List[Any] = elem[1][1].index(SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a : int
a : Tuple[int] =(16, 32, 96, 2_56)
a : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase : str = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase : Dict = self.block_out_channels[i]
lowerCAmelCase : Union[str, Any] = self.block_out_channels[i + 1]
lowerCAmelCase : Union[str, Any] = nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : Tuple = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCAmelCase : Optional[Any] = blocks
lowerCAmelCase : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.conv_in(snake_case__ )
lowerCAmelCase : int = nn.silu(snake_case__ )
for block in self.blocks:
lowerCAmelCase : Dict = block(snake_case__ )
lowerCAmelCase : List[str] = nn.silu(snake_case__ )
lowerCAmelCase : int = self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , lowercase , lowercase ):
"""simple docstring"""
a : int =32
a : int =4
a : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Union[bool, Tuple[bool]] =False
a : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
a : int =2
a : Union[int, Tuple[int]] =8
a : Optional[Union[int, Tuple[int]]] =None
a : int =12_80
a : float =0.0
a : bool =False
a : jnp.dtype =jnp.floataa
a : bool =True
a : int =0
a : str ="rgb"
a : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase : List[str] = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase : str = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase : Dict = jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase : Optional[int] = jax.random.split(snake_case__ )
lowerCAmelCase : int = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.block_out_channels
lowerCAmelCase : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase : List[str] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase : List[Any] = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCAmelCase : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase : str = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Tuple = block_out_channels[0]
lowerCAmelCase : Optional[Any] = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase : Any = output_channel
lowerCAmelCase : Dict = block_out_channels[i]
lowerCAmelCase : List[Any] = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase : str = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase : Optional[Any] = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCAmelCase : Dict = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCAmelCase : int = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCAmelCase : Tuple = down_blocks
lowerCAmelCase : List[str] = controlnet_down_blocks
# mid
lowerCAmelCase : List[str] = block_out_channels[-1]
lowerCAmelCase : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase : List[Any] = nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = False , ):
"""simple docstring"""
lowerCAmelCase : Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase : Optional[int] = jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCAmelCase : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase : List[Any] = jnp.expand_dims(snake_case__ , 0 )
lowerCAmelCase : Optional[int] = self.time_proj(snake_case__ )
lowerCAmelCase : Dict = self.time_embedding(snake_case__ )
# 2. pre-process
lowerCAmelCase : Dict = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Optional[int] = self.conv_in(snake_case__ )
lowerCAmelCase : Union[str, Any] = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCAmelCase : Any = self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCAmelCase : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase , lowerCAmelCase : Dict = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase : List[Any] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase : Tuple = ()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCAmelCase : List[str] = controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase : Dict = controlnet_down_block_res_samples
lowerCAmelCase : Tuple = self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCAmelCase : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=1.0 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any=None ):
'''simple docstring'''
if rng is None:
lowerCAmelCase : Tuple = global_rng
lowerCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=400 , snake_case__=2_000 , snake_case__=1 , snake_case__=0.0 , snake_case__=16_000 , snake_case__=True , snake_case__=80 , snake_case__=16 , snake_case__=64 , snake_case__="hann_window" , snake_case__=80 , snake_case__=7_600 , snake_case__=1e-10 , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : List[Any] = min_seq_length
lowerCAmelCase : Any = max_seq_length
lowerCAmelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : Tuple = feature_size
lowerCAmelCase : Tuple = padding_value
lowerCAmelCase : Dict = sampling_rate
lowerCAmelCase : Optional[Any] = do_normalize
lowerCAmelCase : Optional[Any] = num_mel_bins
lowerCAmelCase : Any = hop_length
lowerCAmelCase : Tuple = win_length
lowerCAmelCase : Optional[int] = win_function
lowerCAmelCase : Optional[int] = fmin
lowerCAmelCase : Tuple = fmax
lowerCAmelCase : List[Any] = mel_floor
lowerCAmelCase : int = return_attention_mask
def lowercase__ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowercase__ ( self , snake_case__=False , snake_case__=False ):
"""simple docstring"""
def _flatten(snake_case__ ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
lowerCAmelCase : Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : List[Any] = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
def lowercase__ ( self , snake_case__=False , snake_case__=False ):
"""simple docstring"""
if equal_length:
lowerCAmelCase : Any = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase : List[Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : List[str] = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Dict =SpeechTaFeatureExtractor
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(snake_case__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case__ , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : int = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test batched
lowerCAmelCase : Optional[Any] = feat_extract(snake_case__ , return_tensors="np" ).input_values
lowerCAmelCase : Optional[Any] = feat_extract(snake_case__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Tuple = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase : Tuple = [None, 1_600, None]
for max_length, padding in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = feat_extract(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors="np" )
lowerCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : int = range(800 , 1_400 , 200 )
lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase : Optional[Any] = ["longest", "max_length", "do_not_pad"]
lowerCAmelCase : int = [None, 1_600, None]
for max_length, padding in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = feat_extract(snake_case__ , max_length=snake_case__ , padding=snake_case__ )
lowerCAmelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Union[str, Any] = feat_extract(
snake_case__ , truncation=snake_case__ , max_length=1_000 , padding="max_length" , return_tensors="np" )
lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : List[Any] = feat_extract(
snake_case__ , truncation=snake_case__ , max_length=1_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Tuple = feat_extract(
snake_case__ , truncation=snake_case__ , max_length=2_000 , padding="longest" , return_tensors="np" )
lowerCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase : Dict = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase : int = feature_extractor(audio_target=snake_case__ , padding=snake_case__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCAmelCase : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test batched
lowerCAmelCase : Any = feature_extractor(snake_case__ , return_tensors="np" ).input_values
lowerCAmelCase : List[Any] = feature_extractor(snake_case__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase : str = np.asarray(snake_case__ )
lowerCAmelCase : Dict = feature_extractor(snake_case__ , return_tensors="np" ).input_values
lowerCAmelCase : Tuple = feature_extractor(snake_case__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase : Dict = feat_extract.model_input_names[0]
lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case__ ) == len(snake_case__ ) for x, y in zip(snake_case__ , processed_features[input_name] ) ) )
lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=snake_case__ )
lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=snake_case__ )
lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase : Any = feat_extract.num_mel_bins # hack!
lowerCAmelCase : Optional[Any] = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase : List[Any] = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.feat_extract_dict
lowerCAmelCase : str = True
lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase : List[str] = [len(snake_case__ ) for x in speech_inputs]
lowerCAmelCase : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase : Optional[int] = feat_extract.num_mel_bins # hack!
lowerCAmelCase : Optional[int] = feat_extract.pad(snake_case__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , snake_case__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.feat_extract_dict
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = self.feature_extraction_class(**snake_case__ )
lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase : Optional[int] = [len(snake_case__ ) for x in speech_inputs]
lowerCAmelCase : List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase : int = min(snake_case__ )
lowerCAmelCase : Optional[Any] = feat_extract.num_mel_bins # hack!
lowerCAmelCase : List[Any] = feat_extract.pad(
snake_case__ , padding="max_length" , max_length=snake_case__ , truncation=snake_case__ , return_tensors="np" )
self.assertIn("attention_mask" , snake_case__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
from datasets import load_dataset
lowerCAmelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase : str = ds.sort("id" ).select(range(snake_case__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
lowerCAmelCase : Tuple = self._load_datasamples(1 )
lowerCAmelCase : int = SpeechTaFeatureExtractor()
lowerCAmelCase : Any = feature_extractor(snake_case__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , snake_case__ , atol=1e-6 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
lowerCAmelCase : Optional[Any] = self._load_datasamples(1 )
lowerCAmelCase : List[Any] = SpeechTaFeatureExtractor()
lowerCAmelCase : Optional[Any] = feature_extractor(audio_target=snake_case__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , snake_case__ , atol=1e-4 ) )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = list[tuple[int, int]]
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = pos_x
lowerCAmelCase : int = pos_y
lowerCAmelCase : Tuple = (pos_y, pos_x)
lowerCAmelCase : List[str] = goal_x
lowerCAmelCase : Tuple = goal_y
lowerCAmelCase : str = g_cost
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = self.calculate_heuristic()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = abs(self.pos_x - self.goal_x )
lowerCAmelCase : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , snake_case__ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case__ )
lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , snake_case__ )
lowerCAmelCase : Any = [self.start]
lowerCAmelCase : list[Node] = []
lowerCAmelCase : Dict = False
def lowercase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase : Dict = True
return self.retrace_path(snake_case__ )
self.closed_nodes.append(snake_case__ )
lowerCAmelCase : str = self.get_successors(snake_case__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case__ )
else:
# retrieve the best current path
lowerCAmelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(snake_case__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case__ )
else:
self.open_nodes.append(snake_case__ )
if not self.reached:
return [self.start.pos]
return None
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = []
for action in delta:
lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
lowerCAmelCase : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case__ , ) )
return successors
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = node
lowerCAmelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
lowerCAmelCase__ = GreedyBestFirst(init, goal)
lowerCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCAmelCase__ = 2
for elem in grid:
print(elem)
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = k_size // 2
lowerCAmelCase , lowerCAmelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE ) + square(SCREAMING_SNAKE_CASE )) / (2 * square(SCREAMING_SNAKE_CASE )) )
return g
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase : List[str] = height - k_size + 1
lowerCAmelCase : List[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase : Dict = 0
for i, j in product(range(SCREAMING_SNAKE_CASE ) , range(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase : List[Any] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE )
# reshape and get the dst image
lowerCAmelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE )
return dst
if __name__ == "__main__":
# read original image
lowerCAmelCase__ = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCAmelCase__ = gaussian_filter(gray, 3, sigma=1)
lowerCAmelCase__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
lowerCAmelCase__ = [
(1_000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
while place < len(SCREAMING_SNAKE_CASE ):
if (place + 1 < len(SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = []
for arabic, roman in ROMAN:
((lowerCAmelCase) , (lowerCAmelCase)) : int = divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict =["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Dict = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase : Optional[int] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ , param_name="crop_size" )
lowerCAmelCase : int = do_resize
lowerCAmelCase : int = size
lowerCAmelCase : Tuple = resample
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : Optional[Any] = crop_size
lowerCAmelCase : Tuple = do_rescale
lowerCAmelCase : int = rescale_factor
lowerCAmelCase : str = do_normalize
lowerCAmelCase : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase : Optional[int] = do_convert_rgb
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase : List[Any] = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Optional[Any] = size if size is not None else self.size
lowerCAmelCase : List[Any] = get_size_dict(snake_case__ , param_name="size" , default_to_square=snake_case__ )
lowerCAmelCase : int = resample if resample is not None else self.resample
lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Optional[Any] = get_size_dict(snake_case__ , param_name="crop_size" , default_to_square=snake_case__ )
lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : List[str] = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase : int = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
lowerCAmelCase : List[str] = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : List[str] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : int = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : int = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : int = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =KandinskyInpaintPipeline
a : List[Any] =["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a : List[str] =[
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a : Dict =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Optional[Any] =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCAmelCase : List[str] = MultilingualCLIP(snake_case__ )
lowerCAmelCase : List[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : str = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
lowerCAmelCase : Optional[Any] = self.dummy_tokenizer
lowerCAmelCase : str = self.dummy_unet
lowerCAmelCase : List[Any] = self.dummy_movq
lowerCAmelCase : Tuple = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case__ , )
lowerCAmelCase : Tuple = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ )
# create init_image
lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : Dict = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase : str = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase : Union[str, Any] = 0
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Any = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "cpu"
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : Any = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Dict = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Dict = output.images
lowerCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Any = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Optional[int] = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = "a hat"
lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase : List[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Tuple = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : List[Any] = pipeline(
snake_case__ , image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
"""simple docstring"""
a : int =0
a : Dict =1
a : str =2
@add_end_docstrings(lowercase )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict ="\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase : Optional[int] = None
if self.model.config.prefix is not None:
lowerCAmelCase : int = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase : List[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = self._sanitize_parameters(prefix=snake_case__ , **self._forward_params )
lowerCAmelCase : str = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase : Tuple = {**self._forward_params, **forward_params}
def lowercase__ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = {}
if prefix is not None:
lowerCAmelCase : List[str] = prefix
if prefix:
lowerCAmelCase : Union[str, Any] = self.tokenizer(
snake_case__ , padding=snake_case__ , add_special_tokens=snake_case__ , return_tensors=self.framework )
lowerCAmelCase : int = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
lowerCAmelCase : Tuple = handle_long_generation
preprocess_params.update(snake_case__ )
lowerCAmelCase : List[str] = generate_kwargs
lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowerCAmelCase : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowerCAmelCase : Tuple = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase : str = self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
if len(snake_case__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowerCAmelCase : int = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*snake_case__ , **snake_case__ )
def __call__( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__="" , snake_case__=None , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.tokenizer(
prefix + prompt_text , padding=snake_case__ , add_special_tokens=snake_case__ , return_tensors=self.framework )
lowerCAmelCase : Tuple = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase : Union[str, Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase : List[Any] = generate_kwargs["max_new_tokens"]
else:
lowerCAmelCase : Optional[Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowerCAmelCase : Optional[Any] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase : Optional[Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowercase__ ( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = model_inputs["input_ids"]
lowerCAmelCase : Optional[int] = model_inputs.get("attention_mask" , snake_case__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase : Dict = None
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[int] = 1
else:
lowerCAmelCase : List[str] = input_ids.shape[0]
lowerCAmelCase : int = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase : List[Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowerCAmelCase : Union[str, Any] = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase : Tuple = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase : Tuple = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase : Optional[Any] = self.model.generate(input_ids=snake_case__ , attention_mask=snake_case__ , **snake_case__ )
lowerCAmelCase : Any = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase : Union[str, Any] = generated_sequence.reshape(snake_case__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase : Optional[Any] = tf.reshape(snake_case__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase__ ( self , snake_case__ , snake_case__=ReturnType.FULL_TEXT , snake_case__=True ):
"""simple docstring"""
lowerCAmelCase : Tuple = model_outputs["generated_sequence"][0]
lowerCAmelCase : Optional[Any] = model_outputs["input_ids"]
lowerCAmelCase : Optional[int] = model_outputs["prompt_text"]
lowerCAmelCase : Tuple = generated_sequence.numpy().tolist()
lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase : int = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase : Union[str, Any] = self.tokenizer.decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase : Union[str, Any] = 0
else:
lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase : str = prompt_text + text[prompt_length:]
else:
lowerCAmelCase : Union[str, Any] = text[prompt_length:]
lowerCAmelCase : Any = {"generated_text": all_text}
records.append(snake_case__ )
return records
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(snake_case__ ):
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(snake_case__ ):
lowerCAmelCase : Dict = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase : int = AutoTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase : Dict = FlaxBertModel.from_pretrained(snake_case__ )
lowerCAmelCase : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase : str = AutoTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase : List[str] = FlaxRobertaModel.from_pretrained(snake_case__ )
lowerCAmelCase : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase : Union[str, Any] = FlaxAutoModel.from_pretrained("bert-base" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase : Optional[Any] = FlaxAutoModel.from_pretrained(snake_case__ , revision="aaaaaa" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
lowerCAmelCase : List[str] = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(snake_case__ , "Use `from_pt=True` to load this model" ):
lowerCAmelCase : List[Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = sum(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase : List[Any] = True
for i in range(1 , s + 1 ):
lowerCAmelCase : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase : int = s - 2 * j
break
return diff
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =BertGenerationTokenizer
a : Union[str, Any] =False
a : Tuple =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Optional[int] = BertGenerationTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = "<s>"
lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case__ ) , 1_002 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = BertGenerationTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = "Hello World!"
lowerCAmelCase : Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase : Dict = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : str = " ".join(snake_case__ )
lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = BertGenerationConfig()
lowerCAmelCase : int = BertGenerationEncoder(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] ="naver-clova-ix/donut-base-finetuned-docvqa"
a : List[Any] =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
a : List[Any] ="document_qa"
a : Union[str, Any] =AutoProcessor
a : Optional[Any] =VisionEncoderDecoderModel
a : List[str] =["image", "text"]
a : Tuple =["text"]
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowerCAmelCase : Any = task_prompt.replace("{user_input}" , snake_case__ )
lowerCAmelCase : Any = self.pre_processor.tokenizer(
snake_case__ , add_special_tokens=snake_case__ , return_tensors="pt" ).input_ids
lowerCAmelCase : Optional[Any] = self.pre_processor(snake_case__ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case__ , ).sequences
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.pre_processor.batch_decode(snake_case__ )[0]
lowerCAmelCase : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
lowerCAmelCase : Dict = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
lowerCAmelCase : List[Any] = re.sub(r"<.*?>" , "" , snake_case__ , count=1 ).strip() # remove first task start token
lowerCAmelCase : Dict = self.pre_processor.tokenajson(snake_case__ )
return sequence["answer"]
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.config_name == "default":
lowerCAmelCase : str = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
lowerCAmelCase : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=False ):
"""simple docstring"""
if gpus is None:
lowerCAmelCase : Dict = 1 if torch.cuda.is_available() else 0
lowerCAmelCase : Optional[int] = {"src": sources, "mt": predictions, "ref": references}
lowerCAmelCase : List[Any] = [dict(zip(snake_case__ , snake_case__ ) ) for t in zip(*data.values() )]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.scorer.predict(snake_case__ , gpus=snake_case__ , progress_bar=snake_case__ )
return {"mean_score": mean_score, "scores": scores}
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < k or k < 0:
raise ValueError("Invalid Input" )
lowerCAmelCase : Optional[int] = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE ) - k ):
lowerCAmelCase : str = current_sum - array[i] + array[i + k]
lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase__ = [randint(-1_000, 1_000) for i in range(100)]
lowerCAmelCase__ = randint(0, 110)
print(F"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
lowerCAmelCase : Union[str, Any] = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
from functools import reduce
lowerCAmelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( SCREAMING_SNAKE_CASE : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str(int(SCREAMING_SNAKE_CASE ) * int(SCREAMING_SNAKE_CASE ) ) , n[i : i + 1_3] ) )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1_2 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] ="transfo-xl"
a : str =["mems"]
a : Optional[Any] ={
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=267_735 , snake_case__=[20_000, 40_000, 200_000] , snake_case__=1_024 , snake_case__=1_024 , snake_case__=16 , snake_case__=64 , snake_case__=4_096 , snake_case__=4 , snake_case__=False , snake_case__=18 , snake_case__=1_600 , snake_case__=1_000 , snake_case__=True , snake_case__=True , snake_case__=0 , snake_case__=-1 , snake_case__=True , snake_case__=0.1 , snake_case__=0.0 , snake_case__=True , snake_case__="normal" , snake_case__=0.01 , snake_case__=0.01 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Optional[Any] = []
self.cutoffs.extend(snake_case__ )
if proj_share_all_but_first:
lowerCAmelCase : List[Any] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[Any] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : Dict = d_embed
lowerCAmelCase : Optional[int] = d_head
lowerCAmelCase : str = d_inner
lowerCAmelCase : Union[str, Any] = div_val
lowerCAmelCase : int = pre_lnorm
lowerCAmelCase : str = n_layer
lowerCAmelCase : Optional[int] = n_head
lowerCAmelCase : Tuple = mem_len
lowerCAmelCase : str = same_length
lowerCAmelCase : int = attn_type
lowerCAmelCase : Any = clamp_len
lowerCAmelCase : List[Any] = sample_softmax
lowerCAmelCase : Any = adaptive
lowerCAmelCase : Any = dropout
lowerCAmelCase : Union[str, Any] = dropatt
lowerCAmelCase : Any = untie_r
lowerCAmelCase : str = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : int = proj_init_std
lowerCAmelCase : Tuple = init_std
lowerCAmelCase : Dict = layer_norm_epsilon
super().__init__(eos_token_id=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : int = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase : Dict = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowercase__ ( self ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = list(struct.unpack(">16L" , snake_case__ ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase : List[str] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.padding()
lowerCAmelCase : Union[str, Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase : str = self.expand_block(snake_case__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase : List[Any] = (b & c) | ((~b) & d)
lowerCAmelCase : List[str] = 0x5a_827_999
elif 20 <= i < 40:
lowerCAmelCase : List[Any] = b ^ c ^ d
lowerCAmelCase : Any = 0x6e_d9e_ba1
elif 40 <= i < 60:
lowerCAmelCase : Any = (b & c) | (b & d) | (c & d)
lowerCAmelCase : Tuple = 0x8f_1bb_cdc
elif 60 <= i < 80:
lowerCAmelCase : Dict = b ^ c ^ d
lowerCAmelCase : Any = 0xca_62c_1d6
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = (
self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(snake_case__ , 30 ),
c,
d,
)
lowerCAmelCase : Tuple = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = B"Test String"
assert SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase : int = parser.parse_args()
lowerCAmelCase : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase : str = f.read()
else:
lowerCAmelCase : List[str] = bytes(SCREAMING_SNAKE_CASE , "utf-8" )
print(SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''prophetnet.tokenizer'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as reader:
lowerCAmelCase : List[Any] = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[int] = token.rstrip("\n" )
lowerCAmelCase : List[Any] = index
return vocab
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] =["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__="[SEP]" , snake_case__="[SEP]" , snake_case__="[SEP]" , snake_case__="[UNK]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
lowerCAmelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCAmelCase : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
lowerCAmelCase : Optional[int] = f"""[unused{i}]"""
lowerCAmelCase : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCAmelCase : int = 12
lowerCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.__dict__.copy()
lowerCAmelCase : List[Any] = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase : List[str] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCAmelCase : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def a__ ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 1_6 , SCREAMING_SNAKE_CASE : str = "bert-base-cased" ):
'''simple docstring'''
lowerCAmelCase : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase : Union[str, Any] = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase : str = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
model.eval()
lowerCAmelCase : Tuple = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : List[str] = metric.compute()
return eval_metric["accuracy"]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : int = config["lr"]
lowerCAmelCase : Tuple = int(config["num_epochs"] )
lowerCAmelCase : List[str] = int(config["seed"] )
lowerCAmelCase : int = int(config["batch_size"] )
lowerCAmelCase : List[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
lowerCAmelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase : int = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCAmelCase : str = 1
lowerCAmelCase : List[str] = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
lowerCAmelCase : List[str] = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = evaluate.load("glue" , "mrpc" )
lowerCAmelCase : Any = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase : Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase : int = args.resume_from_checkpoint.split("epoch_" )[1]
lowerCAmelCase : Optional[Any] = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase : List[str] = int(SCREAMING_SNAKE_CASE ) + 1
lowerCAmelCase : str = evaluation_loop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , "r" ) as f:
lowerCAmelCase : str = json.load(SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase : Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = outputs.loss
lowerCAmelCase : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase : Optional[int] = f"""epoch_{epoch}"""
lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = evaluation_loop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = accuracy
lowerCAmelCase : Tuple = lr_scheduler.get_lr()[0]
lowerCAmelCase : Tuple = optimizer.param_groups[0]["lr"]
lowerCAmelCase : List[Any] = epoch
lowerCAmelCase : Optional[int] = overall_step
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE , default=2 , help="Number of train epochs." , )
lowerCAmelCase : Tuple = parser.parse_args()
lowerCAmelCase : Union[str, Any] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
lowerCAmelCase__ = '''▁'''
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str =VOCAB_FILES_NAMES
a : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict =RemBertTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__="[CLS]" , snake_case__="[SEP]" , snake_case__="<unk>" , snake_case__="[SEP]" , snake_case__="<pad>" , snake_case__="[CLS]" , snake_case__="[MASK]" , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : str = remove_space
lowerCAmelCase : Any = keep_accents
lowerCAmelCase : Optional[Any] = vocab_file
lowerCAmelCase : Tuple = False if not self.vocab_file else True
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case__ ) )
return
lowerCAmelCase : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict ="perceiver"
def __init__( self , snake_case__=256 , snake_case__=1_280 , snake_case__=768 , snake_case__=1 , snake_case__=26 , snake_case__=8 , snake_case__=8 , snake_case__=None , snake_case__=None , snake_case__="kv" , snake_case__=1 , snake_case__=1 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=262 , snake_case__=2_048 , snake_case__=56 , snake_case__=[368, 496] , snake_case__=16 , snake_case__=1_920 , snake_case__=16 , snake_case__=[1, 16, 224, 224] , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[int] = num_latents
lowerCAmelCase : str = d_latents
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Dict = num_blocks
lowerCAmelCase : Any = num_self_attends_per_block
lowerCAmelCase : Optional[Any] = num_self_attention_heads
lowerCAmelCase : Union[str, Any] = num_cross_attention_heads
lowerCAmelCase : Tuple = qk_channels
lowerCAmelCase : List[Any] = v_channels
lowerCAmelCase : Optional[int] = cross_attention_shape_for_attention
lowerCAmelCase : Optional[Any] = self_attention_widening_factor
lowerCAmelCase : str = cross_attention_widening_factor
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = use_query_residual
# masked language modeling attributes
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Dict = max_position_embeddings
# image classification attributes
lowerCAmelCase : str = image_size
# flow attributes
lowerCAmelCase : Union[str, Any] = train_size
# multimodal autoencoding attributes
lowerCAmelCase : List[Any] = num_frames
lowerCAmelCase : Union[str, Any] = audio_samples_per_frame
lowerCAmelCase : Tuple = samples_per_patch
lowerCAmelCase : Dict = output_shape
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase : Tuple = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase : Optional[Any] = preprocessor.num_special_tokens_to_add(snake_case__ )
lowerCAmelCase : Optional[Any] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase : Tuple = [" ".join(["a"] ) * seq_length] * batch_size
lowerCAmelCase : List[str] = dict(preprocessor(snake_case__ , return_tensors=snake_case__ ) )
lowerCAmelCase : Dict = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case__ , snake_case__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase : Union[str, Any] = compute_effective_axis_dimension(snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase : Optional[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : str = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
lowerCAmelCase : str = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="M-CLIP"
def __init__( self , snake_case__=1_024 , snake_case__=768 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = transformerDimSize
lowerCAmelCase : Optional[int] = imageDimSize
super().__init__(**snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =MCLIPConfig
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = XLMRobertaModel(snake_case__ )
lowerCAmelCase : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.transformer(input_ids=snake_case__ , attention_mask=snake_case__ )[0]
lowerCAmelCase : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(snake_case__ ), embs
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import os
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = len(grid[0] )
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[str] = 0
lowerCAmelCase : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
lowerCAmelCase : Optional[int] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase : Optional[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase : int = max(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if max_product > largest:
lowerCAmelCase : List[Any] = max_product
return largest
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCAmelCase : Dict = [[int(SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE ) )]
return largest_product(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : int = EfficientNetConfig()
lowerCAmelCase : Optional[int] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCAmelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCAmelCase : int = CONFIG_MAP[model_name]["depth_coef"]
lowerCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCAmelCase : List[Any] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCAmelCase : Dict = CONFIG_MAP[model_name]["dw_padding"]
lowerCAmelCase : str = "huggingface/label-files"
lowerCAmelCase : Optional[int] = "imagenet-1k-id2label.json"
lowerCAmelCase : Dict = 1_0_0_0
lowerCAmelCase : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : List[str] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = CONFIG_MAP[model_name]["image_size"]
lowerCAmelCase : Optional[int] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=SCREAMING_SNAKE_CASE , )
return preprocessor
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCAmelCase : Optional[int] = sorted(set(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : int = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )}
lowerCAmelCase : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCAmelCase : List[Any] = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCAmelCase : str = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCAmelCase : Optional[int] = "efficientnet." + item[1]
lowerCAmelCase : int = "classifier.weight"
lowerCAmelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCAmelCase : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCAmelCase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowerCAmelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowerCAmelCase : Any = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) )
else:
lowerCAmelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE , weights="imagenet" , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=1_0_0_0 , classifier_activation="softmax" , )
lowerCAmelCase : str = original_model.trainable_variables
lowerCAmelCase : Union[str, Any] = original_model.non_trainable_variables
lowerCAmelCase : int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCAmelCase : int = param.numpy()
lowerCAmelCase : Optional[int] = list(tf_params.keys() )
# Load HuggingFace model
lowerCAmelCase : Tuple = get_efficientnet_config(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCAmelCase : Optional[int] = rename_keys(SCREAMING_SNAKE_CASE )
replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCAmelCase : int = convert_image_processor(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = hf_model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
lowerCAmelCase : int = False
lowerCAmelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCAmelCase : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowerCAmelCase : Dict = image.img_to_array(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 )
lowerCAmelCase : Any = original_model.predict(SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.mkdir(SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
lowerCAmelCase : Union[str, Any] = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowerCAmelCase__ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase__ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase : Union[str, Any] = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if images.ndim == 3:
lowerCAmelCase : List[Any] = images[None, ...]
lowerCAmelCase : Any = (images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCAmelCase : Tuple = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowerCAmelCase : Any = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="markuplm"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0 , snake_case__=2 , snake_case__=256 , snake_case__=1_024 , snake_case__=216 , snake_case__=1_001 , snake_case__=32 , snake_case__=50 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : Optional[int] = position_embedding_type
lowerCAmelCase : List[Any] = use_cache
lowerCAmelCase : str = classifier_dropout
# additional properties
lowerCAmelCase : str = max_depth
lowerCAmelCase : Union[str, Any] = max_xpath_tag_unit_embeddings
lowerCAmelCase : List[Any] = max_xpath_subs_unit_embeddings
lowerCAmelCase : int = tag_pad_id
lowerCAmelCase : Union[str, Any] = subs_pad_id
lowerCAmelCase : List[str] = xpath_unit_hidden_size
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase__ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase__ ,lowerCAmelCase__ = map(int, input(prompt).split(''','''))
lowerCAmelCase__ ,lowerCAmelCase__ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase__ = res(xa, ya)
lowerCAmelCase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : Any =["input_ids", "attention_mask"]
a : Any =None
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__=False , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , add_prefix_space=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , )
lowerCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowerCAmelCase : Optional[Any] = getattr(snake_case__ , pre_tok_state.pop("type" ) )
lowerCAmelCase : Tuple = add_prefix_space
lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = add_prefix_space
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = kwargs.get("is_split_into_words" , snake_case__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
lowerCAmelCase : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase ):
"""simple docstring"""
a : List[str] =["torch", "scipy"]
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowercase__ ( cls , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowercase__ ( cls , *snake_case__ , **snake_case__ ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=2 , snake_case__=True , snake_case__=False , snake_case__=10 , snake_case__=3 , snake_case__=32 * 8 , snake_case__=32 * 8 , snake_case__=4 , snake_case__=64 , ):
"""simple docstring"""
lowerCAmelCase : Tuple = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : str = use_auxiliary_loss
lowerCAmelCase : Any = num_queries
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Union[str, Any] = min_size
lowerCAmelCase : Tuple = max_size
lowerCAmelCase : int = num_labels
lowerCAmelCase : List[Any] = hidden_dim
lowerCAmelCase : List[str] = hidden_dim
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case__ )
lowerCAmelCase : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case__ )
lowerCAmelCase : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case__ ) > 0.5
).float()
lowerCAmelCase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=snake_case__ ) > 0.5).long()
lowerCAmelCase : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase : Optional[Any] = self.num_queries
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : Tuple = [1, 1, 1, 1]
lowerCAmelCase : Optional[int] = self.num_channels
lowerCAmelCase : Tuple = 64
lowerCAmelCase : Tuple = 128
lowerCAmelCase : Optional[int] = self.hidden_dim
lowerCAmelCase : Dict = self.hidden_dim
lowerCAmelCase : Dict = self.hidden_dim
return config
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = self.prepare_config_and_inputs()
lowerCAmelCase : Union[str, Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = output.encoder_hidden_states
lowerCAmelCase : int = output.pixel_decoder_hidden_states
lowerCAmelCase : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , config.decoder_layers )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase : int = MaskaFormerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowerCAmelCase : List[str] = model(snake_case__ , output_hidden_states=snake_case__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
def comm_check_on_output(snake_case__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowerCAmelCase : List[str] = model(snake_case__ )
comm_check_on_output(snake_case__ )
lowerCAmelCase : str = model(
pixel_values=snake_case__ , pixel_mask=snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
comm_check_on_output(snake_case__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a : str ={"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a : Dict =False
a : str =False
a : Dict =False
a : int =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = MaskaFormerModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class(snake_case__ )
lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase : Optional[int] = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case__ ),
"mask_labels": torch.randn((2, 10, *size) , device=snake_case__ ),
"class_labels": torch.zeros(2 , 10 , device=snake_case__ ).long(),
}
lowerCAmelCase : List[Any] = self.model_tester.get_config()
lowerCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation(snake_case__ ).to(snake_case__ )
lowerCAmelCase : Union[str, Any] = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(snake_case__ ).to(snake_case__ )
lowerCAmelCase : Tuple = model(**snake_case__ , output_attentions=snake_case__ )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCAmelCase : Tuple = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase : List[str] = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ ).loss
loss.backward()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Dict = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ ).to(snake_case__ )
model.train()
lowerCAmelCase : Optional[Any] = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
lowerCAmelCase : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1e-4
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case__ )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Optional[Any] = image_processor(snake_case__ , return_tensors="pt" ).to(snake_case__ )
lowerCAmelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowerCAmelCase : Optional[int] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowerCAmelCase : str = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case__ ).eval()
lowerCAmelCase : Dict = self.default_image_processor
lowerCAmelCase : Optional[int] = prepare_img()
lowerCAmelCase : List[str] = image_processor(snake_case__ , return_tensors="pt" ).to(snake_case__ )
lowerCAmelCase : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**snake_case__ )
# masks_queries_logits
lowerCAmelCase : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase : int = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase : Dict = torch.tensor(snake_case__ ).to(snake_case__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
# class_queries_logits
lowerCAmelCase : Optional[int] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case__ ).eval()
lowerCAmelCase : int = self.default_image_processor
lowerCAmelCase : int = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase : List[str] = inputs["pixel_values"].to(snake_case__ )
lowerCAmelCase : Dict = [el.to(snake_case__ ) for el in inputs["mask_labels"]]
lowerCAmelCase : str = [el.to(snake_case__ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , "rb" ) as flax_state_f:
lowerCAmelCase : Dict = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowerCAmelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowerCAmelCase : Tuple = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = ""
lowerCAmelCase : str = flatten_dict(SCREAMING_SNAKE_CASE , sep="." )
lowerCAmelCase : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase : List[str] = []
lowerCAmelCase : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase : Union[str, Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase : Dict = flax_key_tuple_array[:-1] + ["weight"]
lowerCAmelCase : int = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase : List[Any] = flax_key_tuple_array[:-1] + ["weight"]
lowerCAmelCase : Optional[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase : int = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
lowerCAmelCase : Union[str, Any] = ".".join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowerCAmelCase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowerCAmelCase : List[Any] = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : List[str] = BlipImageProcessor()
lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
lowerCAmelCase : str = BlipProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase : Optional[int] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : int = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : str = image_processor(snake_case__ , return_tensors="np" )
lowerCAmelCase : Optional[Any] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Optional[Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Union[str, Any] = "lower newer"
lowerCAmelCase : List[str] = processor(text=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : Optional[Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : int = "lower newer"
lowerCAmelCase : List[Any] = self.prepare_image_inputs()
lowerCAmelCase : str = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : str = processor.batch_decode(snake_case__ )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : str = "lower newer"
lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] =VOCAB_FILES_NAMES
a : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str =["input_ids", "attention_mask"]
a : int =TaTokenizer
a : List[int] =[]
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="</s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__=100 , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase : Any = [f"""<extra_id_{i}>""" for i in range(snake_case__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase : List[str] = len(set(filter(lambda snake_case__ : bool("extra_id_" in str(snake_case__ ) ) , snake_case__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , extra_ids=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCAmelCase : int = vocab_file
lowerCAmelCase : int = False if not self.vocab_file else True
lowerCAmelCase : Union[str, Any] = extra_ids
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase : List[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , snake_case__ , )
return max_model_length
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self ):
"""simple docstring"""
return list(
set(filter(lambda snake_case__ : bool(re.search(r"<extra_id_\d+>" , snake_case__ ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self ):
"""simple docstring"""
return [self.convert_tokens_to_ids(snake_case__ ) for token in self.get_sentinel_tokens()]
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any]="" ):
'''simple docstring'''
lowerCAmelCase : Dict = tempfile.mkdtemp()
return os.path.join(SCREAMING_SNAKE_CASE , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase : Optional[Any] = AgentAudio(snake_case__ )
lowerCAmelCase : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case__ ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sf.read(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , torch.tensor(snake_case__ ) , atol=1e-4 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase : Optional[Any] = get_new_path(suffix=".wav" )
sf.write(snake_case__ , snake_case__ , 16_000 )
lowerCAmelCase : List[str] = AgentAudio(snake_case__ )
self.assertTrue(torch.allclose(snake_case__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case__ )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = torch.randint(0 , 256 , (64, 64, 3) )
lowerCAmelCase : Tuple = AgentImage(snake_case__ )
lowerCAmelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowerCAmelCase : str = Image.open(snake_case__ )
lowerCAmelCase : List[str] = AgentImage(snake_case__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowerCAmelCase : List[str] = Image.open(snake_case__ )
lowerCAmelCase : Tuple = AgentImage(snake_case__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = "Hey!"
lowerCAmelCase : Union[str, Any] = AgentText(snake_case__ )
self.assertEqual(snake_case__ , agent_type.to_string() )
self.assertEqual(snake_case__ , agent_type.to_raw() )
self.assertEqual(snake_case__ , snake_case__ )
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="van"
def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=[7, 3, 3, 3] , snake_case__=[4, 2, 2, 2] , snake_case__=[64, 128, 320, 512] , snake_case__=[3, 3, 12, 3] , snake_case__=[8, 8, 4, 4] , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=1e-2 , snake_case__=0.0 , snake_case__=0.0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Optional[Any] = patch_sizes
lowerCAmelCase : Optional[int] = strides
lowerCAmelCase : int = hidden_sizes
lowerCAmelCase : int = depths
lowerCAmelCase : Optional[int] = mlp_ratios
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : List[str] = layer_scale_init_value
lowerCAmelCase : List[Any] = drop_path_rate
lowerCAmelCase : str = dropout_rate
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : torch.nn.Module , SCREAMING_SNAKE_CASE : BnbQuantizationConfig , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
lowerCAmelCase : int = bnb_quantization_config.load_in_abit
lowerCAmelCase : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCAmelCase : Tuple = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
lowerCAmelCase : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase : Any = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
lowerCAmelCase : Optional[Any] = load_in_abit
lowerCAmelCase : List[Any] = load_in_abit
lowerCAmelCase : List[str] = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCAmelCase : Optional[int] = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
lowerCAmelCase : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase : List[str] = name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCAmelCase : int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase : Any = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase : Optional[Any] = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCAmelCase : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : List[str] = special_dtypes
lowerCAmelCase : Optional[int] = no_split_module_classes
lowerCAmelCase : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase : str = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == "balanced_low_0") , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Optional[int] = max_memory
lowerCAmelCase : Optional[int] = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
lowerCAmelCase : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase : str = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
if modules_to_not_convert is None:
lowerCAmelCase : List[Any] = []
lowerCAmelCase , lowerCAmelCase : List[Any] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=None , ):
'''simple docstring'''
lowerCAmelCase : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase : Optional[Any] = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase : List[str] = ".".join(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase : str = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCAmelCase : Tuple = module.weight.data
if module.bias is not None:
lowerCAmelCase : Optional[int] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase , lowerCAmelCase : List[str] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
with init_empty_weights():
lowerCAmelCase : List[str] = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase : Dict = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase : List[Any] = sum(SCREAMING_SNAKE_CASE , [] )
lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
lowerCAmelCase : Optional[Any] = False
if hasattr(SCREAMING_SNAKE_CASE , "base_model_prefix" ):
lowerCAmelCase : str = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase : Optional[int] = list(model.named_children() )
lowerCAmelCase : Dict = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase : Optional[Any] = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
lowerCAmelCase : Optional[Any] = [".weight", ".bias"]
lowerCAmelCase : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase : List[Any] = name.replace(SCREAMING_SNAKE_CASE , "" )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : nn.Module ):
'''simple docstring'''
return next(parameter.parameters() ).device
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = param_name
lowerCAmelCase : Dict = model
if "." in tensor_name:
lowerCAmelCase : int = tensor_name.split("." )
for split in splits[:-1]:
lowerCAmelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowerCAmelCase : Optional[Any] = new_module
lowerCAmelCase : Tuple = splits[-1]
# offload weights
lowerCAmelCase : Dict = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace("weight" , "SCB" ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "meta" , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def a__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def a__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase__ = '''Create a default config file for Accelerate with only a few flags set.'''
def a__ ( SCREAMING_SNAKE_CASE : Any="no" , SCREAMING_SNAKE_CASE : str = default_json_config_file , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = Path(SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
lowerCAmelCase : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
lowerCAmelCase : Optional[Any] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase : List[Any] = torch.cuda.device_count()
lowerCAmelCase : Optional[int] = num_gpus
lowerCAmelCase : Optional[Any] = False
if num_gpus > 1:
lowerCAmelCase : str = "MULTI_GPU"
else:
lowerCAmelCase : Union[str, Any] = "NO"
elif is_xpu_available() and use_xpu:
lowerCAmelCase : List[Any] = torch.xpu.device_count()
lowerCAmelCase : Optional[int] = num_xpus
lowerCAmelCase : List[str] = False
if num_xpus > 1:
lowerCAmelCase : Tuple = "MULTI_XPU"
else:
lowerCAmelCase : Dict = "NO"
elif is_npu_available():
lowerCAmelCase : List[Any] = torch.npu.device_count()
lowerCAmelCase : str = num_npus
lowerCAmelCase : Any = False
if num_npus > 1:
lowerCAmelCase : Optional[int] = "MULTI_NPU"
else:
lowerCAmelCase : Tuple = "NO"
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = True
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Optional[int] = "NO"
lowerCAmelCase : Optional[Any] = ClusterConfig(**SCREAMING_SNAKE_CASE )
config.to_json_file(SCREAMING_SNAKE_CASE )
return path
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Dict = parser.add_parser("default" , parents=SCREAMING_SNAKE_CASE , help=SCREAMING_SNAKE_CASE , formatter_class=SCREAMING_SNAKE_CASE )
parser.add_argument(
"--config_file" , default=SCREAMING_SNAKE_CASE , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=SCREAMING_SNAKE_CASE , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = 0
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase : Optional[Any] = i + 1
else:
lowerCAmelCase : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for char in word:
lowerCAmelCase : Optional[int] = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Any = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE )
return word_list
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : str = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
lowerCAmelCase : List[Any] = bert_tokens
lowerCAmelCase , lowerCAmelCase : List[Any] = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
lowerCAmelCase : List[str] = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : Dict = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
lowerCAmelCase : Union[str, Any] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Union[str, Any] = "##" + bert_word[j]
lowerCAmelCase : Dict = start + i
lowerCAmelCase : str = False
break
if single_word:
start += 1
return bert_word
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : LTP , SCREAMING_SNAKE_CASE : BertTokenizer ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 1_0_0 ):
lowerCAmelCase : str = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
lowerCAmelCase : List[Any] = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 1_0_0 ):
lowerCAmelCase : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = []
for id in input_ids:
lowerCAmelCase : List[str] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
lowerCAmelCase : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
lowerCAmelCase : Optional[Any] = f.readlines()
lowerCAmelCase : Tuple = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Tuple = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : List[str] = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
lowerCAmelCase : Dict = [json.dumps(SCREAMING_SNAKE_CASE ) + "\n" for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
lowerCAmelCase : Tuple = {}
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def lowercase__ ( self , snake_case__ , *snake_case__ , snake_case__=1 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
lowerCAmelCase : Dict = []
for i in range(snake_case__ ):
lowerCAmelCase : int = placeholder_token + f"""_{i}"""
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
lowerCAmelCase : int = output
def lowercase__ ( self , snake_case__ , snake_case__=False , snake_case__=1.0 ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Any = []
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase : Union[str, Any] = self.token_map[placeholder_token]
lowerCAmelCase : Optional[Any] = tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase : List[Any] = copy.copy(snake_case__ )
random.shuffle(snake_case__ )
lowerCAmelCase : Union[str, Any] = text.replace(snake_case__ , " ".join(snake_case__ ) )
return text
def __call__( self , snake_case__ , *snake_case__ , snake_case__=False , snake_case__=1.0 , **snake_case__ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def lowercase__ ( self , snake_case__ , *snake_case__ , snake_case__=False , snake_case__=1.0 , **snake_case__ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : tuple[int, int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[Any] = position
lowerCAmelCase : Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCAmelCase : List[str] = []
for position in positions:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : tuple[int, int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase , lowerCAmelCase : Any = position
if board[y][x] == 0:
lowerCAmelCase : int = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
lowerCAmelCase : Optional[Any] = 0
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Optional[Any] = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures/vocab.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Tuple =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = 0
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Tuple = WavaVecaConfig()
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) )
lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor()
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
lowerCAmelCase : Any = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f:
lowerCAmelCase : Union[str, Any] = json.load(snake_case__ )
config_dict.pop("processor_class" )
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write(json.dumps(snake_case__ ) )
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Dict = WavaVecaFeatureExtractor()
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
lowerCAmelCase : str = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f:
lowerCAmelCase : Tuple = json.load(snake_case__ )
config_dict.pop("processor_class" )
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write(json.dumps(snake_case__ ) )
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Dict = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write("{}" )
lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(snake_case__ ):
lowerCAmelCase : Dict = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
lowerCAmelCase : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
lowerCAmelCase : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
lowerCAmelCase : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowerCAmelCase : str = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase : int = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Optional[int] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCAmelCase : List[str] = CustomTokenizer(snake_case__ )
lowerCAmelCase : str = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] =False
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =False
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="AutoFeatureExtractor"
a : Dict ="AutoTokenizer"
a : Dict =False
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCAmelCase : Dict = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , "test-processor" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , "test-processor-org" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="valid_org" , )
lowerCAmelCase : List[Any] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowercase__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Optional[Any] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCAmelCase : Optional[int] = CustomTokenizer(snake_case__ )
lowerCAmelCase : List[str] = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowerCAmelCase : Optional[Any] = Repository(snake_case__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) ) as f:
lowerCAmelCase : str = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_processing.py" ) ) )
repo.push_to_hub()
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =AltDiffusionPipeline
a : int =TEXT_TO_IMAGE_PARAMS
a : int =TEXT_TO_IMAGE_BATCH_PARAMS
a : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
a : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowerCAmelCase : List[str] = CLIPTextModel(snake_case__ )
lowerCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase : Optional[Any] = 77
lowerCAmelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : int = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase : int = RobertaSeriesModelWithTransformation(snake_case__ )
lowerCAmelCase : Optional[int] = text_encoder
lowerCAmelCase : str = AltDiffusionPipeline(**snake_case__ )
lowerCAmelCase : Any = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Tuple = "A photo of an astronaut"
lowerCAmelCase : List[Any] = alt_pipe(**snake_case__ )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : int = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase : Union[str, Any] = RobertaSeriesModelWithTransformation(snake_case__ )
lowerCAmelCase : Union[str, Any] = text_encoder
lowerCAmelCase : Any = AltDiffusionPipeline(**snake_case__ )
lowerCAmelCase : Any = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Any = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : List[str] = alt_pipe(**snake_case__ )
lowerCAmelCase : Optional[int] = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Union[str, Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=snake_case__ )
lowerCAmelCase : Union[str, Any] = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Tuple = "A painting of a squirrel eating a burger"
lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase : str = alt_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : str = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
lowerCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=snake_case__ , safety_checker=snake_case__ )
lowerCAmelCase : Any = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase : Dict = torch.manual_seed(0 )
lowerCAmelCase : Tuple = alt_pipe([prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="numpy" )
lowerCAmelCase : Tuple = output.images
lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Optional[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="tapas"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_024 , snake_case__=[3, 256, 256, 2, 256, 256, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : List[Any] = type_vocab_sizes
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase : Union[str, Any] = positive_label_weight
lowerCAmelCase : Tuple = num_aggregation_labels
lowerCAmelCase : Union[str, Any] = aggregation_loss_weight
lowerCAmelCase : Optional[int] = use_answer_as_supervision
lowerCAmelCase : List[str] = answer_loss_importance
lowerCAmelCase : List[Any] = use_normalized_answer_loss
lowerCAmelCase : Optional[int] = huber_loss_delta
lowerCAmelCase : Union[str, Any] = temperature
lowerCAmelCase : Dict = aggregation_temperature
lowerCAmelCase : Optional[Any] = use_gumbel_for_cells
lowerCAmelCase : Union[str, Any] = use_gumbel_for_aggregation
lowerCAmelCase : Optional[Any] = average_approximation_function
lowerCAmelCase : Optional[Any] = cell_selection_preference
lowerCAmelCase : int = answer_loss_cutoff
lowerCAmelCase : int = max_num_rows
lowerCAmelCase : str = max_num_columns
lowerCAmelCase : Union[str, Any] = average_logits_per_cell
lowerCAmelCase : Any = select_one_column
lowerCAmelCase : Optional[Any] = allow_empty_column_selection
lowerCAmelCase : Tuple = init_cell_selection_weights_to_zero
lowerCAmelCase : str = reset_position_index_per_cell
lowerCAmelCase : str = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase : Dict = aggregation_labels
lowerCAmelCase : Optional[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case__ ):
lowerCAmelCase : str = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int =["image_processor", "tokenizer"]
a : int ="OwlViTImageProcessor"
a : List[Any] =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
lowerCAmelCase : Dict = kwargs.pop("feature_extractor" )
lowerCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="max_length" , snake_case__="np" , **snake_case__ ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(snake_case__ , snake_case__ ) or (isinstance(snake_case__ , snake_case__ ) and not isinstance(text[0] , snake_case__ )):
lowerCAmelCase : Any = [self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )]
elif isinstance(snake_case__ , snake_case__ ) and isinstance(text[0] , snake_case__ ):
lowerCAmelCase : int = []
# Maximum number of queries across batch
lowerCAmelCase : Tuple = max([len(snake_case__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case__ ) != max_num_queries:
lowerCAmelCase : str = t + [" "] * (max_num_queries - len(snake_case__ ))
lowerCAmelCase : int = self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
encodings.append(snake_case__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCAmelCase : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCAmelCase : Any = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCAmelCase : Tuple = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase : Union[str, Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCAmelCase : str = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase : Tuple = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCAmelCase : str = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCAmelCase : Union[str, Any] = BatchEncoding()
lowerCAmelCase : str = input_ids
lowerCAmelCase : Any = attention_mask
if query_images is not None:
lowerCAmelCase : Tuple = BatchEncoding()
lowerCAmelCase : Dict = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ ).pixel_values
lowerCAmelCase : Union[str, Any] = query_pixel_values
if images is not None:
lowerCAmelCase : Optional[int] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowerCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.image_processor.post_process(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case__ , )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case__ , )
return self.image_processor
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ = 16 , snake_case__ = 88 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = 32 , snake_case__ = None , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Union[str, Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case__ , attention_head_dim=snake_case__ , in_channels=snake_case__ , num_layers=snake_case__ , dropout=snake_case__ , norm_num_groups=snake_case__ , cross_attention_dim=snake_case__ , attention_bias=snake_case__ , sample_size=snake_case__ , num_vector_embeds=snake_case__ , activation_fn=snake_case__ , num_embeds_ada_norm=snake_case__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase : Optional[int] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase : str = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase : str = [1, 0]
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = hidden_states
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Union[str, Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase : Union[str, Any] = self.transformer_index_for_condition[i]
lowerCAmelCase : Any = self.transformers[transformer_index](
snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ , cross_attention_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case__ )
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def a__ ( SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[Any] = coefficient_matrix.shape
lowerCAmelCase , lowerCAmelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase : Optional[Any] = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if colsa != 1:
lowerCAmelCase : List[str] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if rowsa != rowsa:
lowerCAmelCase : Optional[Any] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != rowsa:
lowerCAmelCase : Tuple = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(SCREAMING_SNAKE_CASE )} and {rowsa}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase , lowerCAmelCase : str = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = []
for row in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = 0
for col in range(SCREAMING_SNAKE_CASE ):
if col == row:
lowerCAmelCase : Any = table[row][col]
elif col == cols - 1:
lowerCAmelCase : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase : Tuple = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = new_val
return [float(SCREAMING_SNAKE_CASE ) for i in new_val]
def a__ ( SCREAMING_SNAKE_CASE : NDArray[floataa] ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[Any] = table.shape
lowerCAmelCase : List[str] = True
for i in range(0 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : Optional[Any] = 13
lowerCAmelCase : Optional[Any] = 7
lowerCAmelCase : Any = True
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = True
lowerCAmelCase : Dict = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : str = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Union[str, Any] = 2
lowerCAmelCase : str = 99
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Union[str, Any] = 32
lowerCAmelCase : str = 2
lowerCAmelCase : Dict = 4
lowerCAmelCase : str = 0.1
lowerCAmelCase : Optional[Any] = 0.1
lowerCAmelCase : List[str] = 512
lowerCAmelCase : List[str] = 16
lowerCAmelCase : List[Any] = 2
lowerCAmelCase : Tuple = 0.02
lowerCAmelCase : List[str] = 3
lowerCAmelCase : Dict = 4
lowerCAmelCase : Any = "last"
lowerCAmelCase : List[Any] = True
lowerCAmelCase : str = None
lowerCAmelCase : List[str] = 0
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCAmelCase : Optional[Any] = None
if self.use_input_lengths:
lowerCAmelCase : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[Any] = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = TFFlaubertModel(config=snake_case__ )
lowerCAmelCase : Tuple = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCAmelCase : Dict = model(snake_case__ )
lowerCAmelCase : int = [input_ids, input_mask]
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFFlaubertWithLMHeadModel(snake_case__ )
lowerCAmelCase : Tuple = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(snake_case__ )
lowerCAmelCase : Dict = {"input_ids": input_ids, "lengths": input_lengths}
lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TFFlaubertForSequenceClassification(snake_case__ )
lowerCAmelCase : int = {"input_ids": input_ids, "lengths": input_lengths}
lowerCAmelCase : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : Any = TFFlaubertForTokenClassification(config=snake_case__ )
lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : Dict = TFFlaubertForMultipleChoice(config=snake_case__ )
lowerCAmelCase : Dict = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : str = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : List[Any] =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a : List[Any] =(
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : str =False
a : List[str] =False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = TFFlaubertModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : List[Any] = TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCAmelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCAmelCase : Dict = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
lowerCAmelCase : str = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[int] =None
a : Optional[jnp.ndarray] =None
a : Optional[jnp.ndarray] =None # sigma(t_i)
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
return cls()
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : jnp.ndarray
a : jnp.ndarray
a : KarrasVeSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.007 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = jnp.arange(0 , snake_case__ )[::-1].copy()
lowerCAmelCase : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=snake_case__ , schedule=jnp.array(snake_case__ , dtype=jnp.floataa ) , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase : Optional[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase : int = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase : int = random.split(snake_case__ , num=1 )
lowerCAmelCase : Union[str, Any] = self.config.s_noise * random.normal(key=snake_case__ , shape=sample.shape )
lowerCAmelCase : Optional[int] = sigma + gamma * sigma
lowerCAmelCase : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : str = sample_hat + sigma_hat * model_output
lowerCAmelCase : Tuple = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case__ , derivative=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : int = sample_prev + sigma_prev * model_output
lowerCAmelCase : Dict = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=snake_case__ , derivative=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
raise NotImplementedError()
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=0 ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
for old_item in old_list:
lowerCAmelCase : List[str] = old_item.replace("in_layers.0" , "norm1" )
lowerCAmelCase : int = new_item.replace("in_layers.2" , "conv1" )
lowerCAmelCase : Optional[Any] = new_item.replace("out_layers.0" , "norm2" )
lowerCAmelCase : Any = new_item.replace("out_layers.3" , "conv2" )
lowerCAmelCase : List[Any] = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowerCAmelCase : List[Any] = new_item.replace("skip_connection" , "conv_shortcut" )
lowerCAmelCase : str = shave_segments(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any]=0 ):
'''simple docstring'''
lowerCAmelCase : Dict = []
for old_item in old_list:
lowerCAmelCase : List[str] = old_item
lowerCAmelCase : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
lowerCAmelCase : Union[str, Any] = new_item.replace("norm.bias" , "group_norm.bias" )
lowerCAmelCase : List[Any] = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowerCAmelCase : List[str] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowerCAmelCase : Optional[int] = shave_segments(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase : Any = old_checkpoint[path]
lowerCAmelCase : List[Any] = old_tensor.shape[0] // 3
lowerCAmelCase : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase : str = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCAmelCase : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase : int = query.reshape(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = key.reshape(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = value.reshape(SCREAMING_SNAKE_CASE )
for path in paths:
lowerCAmelCase : Union[str, Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase : Union[str, Any] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowerCAmelCase : Optional[Any] = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowerCAmelCase : Union[str, Any] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase : Any = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase : Optional[int] = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCAmelCase : Optional[Any] = old_checkpoint[path["old"]]
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : List[Any] = checkpoint["time_embed.0.weight"]
lowerCAmelCase : Optional[Any] = checkpoint["time_embed.0.bias"]
lowerCAmelCase : Dict = checkpoint["time_embed.2.weight"]
lowerCAmelCase : Tuple = checkpoint["time_embed.2.bias"]
lowerCAmelCase : int = checkpoint["input_blocks.0.0.weight"]
lowerCAmelCase : List[Any] = checkpoint["input_blocks.0.0.bias"]
lowerCAmelCase : Dict = checkpoint["out.0.weight"]
lowerCAmelCase : Any = checkpoint["out.0.bias"]
lowerCAmelCase : List[str] = checkpoint["out.2.weight"]
lowerCAmelCase : str = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCAmelCase : Tuple = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCAmelCase : Any = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCAmelCase : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCAmelCase : Dict = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
for i in range(1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = (i - 1) // (config["num_res_blocks"] + 1)
lowerCAmelCase : Optional[Any] = (i - 1) % (config["num_res_blocks"] + 1)
lowerCAmelCase : Union[str, Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase : int = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase : Any = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase : Optional[Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase : Any = renew_resnet_paths(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = {"old": f"""input_blocks.{i}.0""", "new": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase : List[str] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = renew_attention_paths(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = {
"old": f"""input_blocks.{i}.1""",
"new": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase : Any = {
f"""input_blocks.{i}.1.qkv.bias""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : str = middle_blocks[0]
lowerCAmelCase : Union[str, Any] = middle_blocks[1]
lowerCAmelCase : Optional[int] = middle_blocks[2]
lowerCAmelCase : Optional[Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = renew_attention_paths(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , attention_paths_to_split=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = i // (config["num_res_blocks"] + 1)
lowerCAmelCase : Union[str, Any] = i % (config["num_res_blocks"] + 1)
lowerCAmelCase : Optional[Any] = [shave_segments(SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
lowerCAmelCase : List[Any] = {}
for layer in output_block_layers:
lowerCAmelCase , lowerCAmelCase : Optional[int] = layer.split("." )[0], shave_segments(SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : List[Any] = [layer_name]
if len(SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase : Tuple = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase : int = renew_resnet_paths(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = renew_resnet_paths(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = {"old": f"""output_blocks.{i}.0""", "new": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase : Tuple = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCAmelCase : Union[str, Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase : Dict = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE ) == 2:
lowerCAmelCase : Optional[Any] = []
if len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = renew_attention_paths(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = {
"old": f"""output_blocks.{i}.1""",
"new": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase : Optional[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE , )
else:
lowerCAmelCase : int = renew_resnet_paths(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase : Optional[Any] = ".".join(["output_blocks", str(SCREAMING_SNAKE_CASE ), path["old"]] )
lowerCAmelCase : Tuple = ".".join(["up_blocks", str(SCREAMING_SNAKE_CASE ), "resnets", str(SCREAMING_SNAKE_CASE ), path["new"]] )
lowerCAmelCase : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCAmelCase__ = json.loads(f.read())
lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCAmelCase__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
lowerCAmelCase__ = '''▁'''
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =BigBirdTokenizer
a : Any =["input_ids", "attention_mask"]
a : List[int] =[]
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
lowerCAmelCase : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
lowerCAmelCase : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
lowerCAmelCase : int = vocab_file
lowerCAmelCase : Tuple = False if not self.vocab_file else True
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : str = [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : List[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
from torch import nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : str = class_size
lowerCAmelCase : Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase : str = nn.Linear(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.mlp(snake_case__ )
return logits
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = parent
def lowercase__ ( self ):
"""simple docstring"""
return {}
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
lowerCAmelCase : Any = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = MarkupLMFeatureExtractionTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase : Optional[int] = get_html_strings()[0]
lowerCAmelCase : Optional[Any] = feature_extractor(snake_case__ )
# fmt: off
lowerCAmelCase : Dict = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
lowerCAmelCase : Tuple = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , snake_case__ )
self.assertEqual(encoding.xpaths , snake_case__ )
# Test batched
lowerCAmelCase : str = get_html_strings()
lowerCAmelCase : Any = feature_extractor(snake_case__ )
# fmt: off
lowerCAmelCase : Tuple = expected_nodes + [["My First Heading", "My first paragraph."]]
lowerCAmelCase : Optional[Any] = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , snake_case__ )
self.assertEqual(encoding.xpaths , snake_case__ )
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : str = BertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : Optional[Any] = BertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowercase__ ( snake_case__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowerCAmelCase : Optional[Any] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCAmelCase : Tuple = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase : List[str] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
lowerCAmelCase : Tuple = f"""layers_{str(SCREAMING_SNAKE_CASE )}"""
# Self-Attention
lowerCAmelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowerCAmelCase : List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowerCAmelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowerCAmelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowerCAmelCase : Dict = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowerCAmelCase : Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowerCAmelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowerCAmelCase : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowerCAmelCase : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowerCAmelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowerCAmelCase : Optional[int] = flax_model.params["encoder"]["block"][str(SCREAMING_SNAKE_CASE )]["layer"]
lowerCAmelCase : Tuple = tax_attention_key
lowerCAmelCase : List[Any] = tax_attention_out
lowerCAmelCase : Dict = tax_attention_query
lowerCAmelCase : List[str] = tax_attention_value
lowerCAmelCase : Union[str, Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase : int = tax_global_layer_norm
if split_mlp_wi:
lowerCAmelCase : Union[str, Any] = tax_mlp_wi_a
lowerCAmelCase : Tuple = tax_mlp_wi_a
else:
lowerCAmelCase : Union[str, Any] = tax_mlp_wi
lowerCAmelCase : Optional[Any] = tax_mlp_wo
lowerCAmelCase : int = tax_mlp_layer_norm
lowerCAmelCase : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowerCAmelCase : List[Any] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowerCAmelCase : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase : Optional[int] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowerCAmelCase : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
lowerCAmelCase : str = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowerCAmelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCAmelCase : Optional[int] = f"""layers_{str(SCREAMING_SNAKE_CASE )}"""
# Self-Attention
lowerCAmelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowerCAmelCase : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowerCAmelCase : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowerCAmelCase : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowerCAmelCase : Dict = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowerCAmelCase : Any = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowerCAmelCase : Optional[Any] = tax_enc_dec_attention_module["key"]["kernel"]
lowerCAmelCase : str = tax_enc_dec_attention_module["out"]["kernel"]
lowerCAmelCase : List[str] = tax_enc_dec_attention_module["query"]["kernel"]
lowerCAmelCase : str = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowerCAmelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowerCAmelCase : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowerCAmelCase : Tuple = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowerCAmelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowerCAmelCase : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowerCAmelCase : Dict = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowerCAmelCase : List[str] = flax_model.params["decoder"]["block"][str(SCREAMING_SNAKE_CASE )]["layer"]
lowerCAmelCase : str = tax_attention_key
lowerCAmelCase : Any = tax_attention_out
lowerCAmelCase : str = tax_attention_query
lowerCAmelCase : Optional[Any] = tax_attention_value
lowerCAmelCase : Union[str, Any] = tax_pre_attention_layer_norm
lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
lowerCAmelCase : Union[str, Any] = tax_enc_dec_attention_out
lowerCAmelCase : int = tax_enc_dec_attention_query
lowerCAmelCase : Optional[int] = tax_enc_dec_attention_value
lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
lowerCAmelCase : Optional[int] = tax_mlp_wi_a
lowerCAmelCase : Optional[Any] = tax_mlp_wi_a
else:
lowerCAmelCase : Optional[Any] = tax_mlp_wi
lowerCAmelCase : Dict = tax_mlp_wo
lowerCAmelCase : Optional[Any] = txa_mlp_layer_norm
lowerCAmelCase : int = flax_model_decoder_layer_block
# Decoder Normalization
lowerCAmelCase : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowerCAmelCase : Dict = txa_decoder_norm
# Only for layer 0:
lowerCAmelCase : Union[str, Any] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowerCAmelCase : str = tax_decoder_rel_embedding
# Token Embeddings
lowerCAmelCase : Tuple = tax_model["target"]["token_embedder"]["embedding"]
lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCAmelCase : Optional[Any] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(SCREAMING_SNAKE_CASE )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=[0.48145466, 0.4578275, 0.40821073] , snake_case__=[0.26862954, 0.26130258, 0.27577711] , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : int = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase : str = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : str = max_resolution
lowerCAmelCase : Any = do_resize
lowerCAmelCase : Optional[int] = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : Tuple = crop_size
lowerCAmelCase : Optional[int] = do_normalize
lowerCAmelCase : Dict = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : List[str] = do_convert_rgb
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase__ ( self , snake_case__=False , snake_case__=False , snake_case__=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCAmelCase : Optional[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCAmelCase : List[str] = []
for i in range(self.batch_size ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCAmelCase : int = [torch.from_numpy(snake_case__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Dict =ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case__ )
lowerCAmelCase : str = 3
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=2 , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : Optional[int] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : Any = is_training
lowerCAmelCase : str = use_labels
lowerCAmelCase : int = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : int = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Tuple = scope
lowerCAmelCase : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase : Any = (image_size // patch_size) ** 2
lowerCAmelCase : Tuple = num_patches + 2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : Dict = 1
lowerCAmelCase : List[str] = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.type_sequence_label_size
lowerCAmelCase : Optional[Any] = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Optional[Any] = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : int = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a : str =(
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a : Tuple =False
a : Optional[Any] =False
a : Union[str, Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = DeiTModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : str = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCAmelCase : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCAmelCase : str = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
lowerCAmelCase : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowerCAmelCase : int = model(**snake_case__ ).loss
loss.backward()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
lowerCAmelCase : List[Any] = problem_type["title"]
lowerCAmelCase : Dict = problem_type["num_labels"]
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowerCAmelCase : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
lowerCAmelCase : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowerCAmelCase : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
lowerCAmelCase : Union[str, Any] = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Tuple = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case__ )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(**snake_case__ )
# verify the logits
lowerCAmelCase : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Optional[int] = prepare_img()
lowerCAmelCase : str = image_processor(images=snake_case__ , return_tensors="pt" )
lowerCAmelCase : str = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase : Any = model(snake_case__ )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any ="luke"
def __init__( self , snake_case__=50_267 , snake_case__=500_000 , snake_case__=768 , snake_case__=256 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=None , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Optional[int] = entity_vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = entity_emb_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Optional[int] = use_entity_aware_attention
lowerCAmelCase : List[Any] = classifier_dropout
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[str] =field(
default=lowercase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase )} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[str] =field(
default=lowercase , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] =field(
default=lowercase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool =field(
default=lowercase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool =field(default=lowercase , metadata={"help": "Whether ot not to use whole word mask."} )
a : float =field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float =field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int =field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int =field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool =field(
default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def a__ ( SCREAMING_SNAKE_CASE : DataTrainingArguments , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[str] = None , ):
'''simple docstring'''
def _dataset(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
lowerCAmelCase : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase : Union[str, Any] = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
lowerCAmelCase : List[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase : Optional[int] = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase : Tuple = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase : int = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase : str = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase : Optional[Any] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : Dict = trainer.evaluate()
lowerCAmelCase : Optional[int] = math.exp(eval_output["eval_loss"] )
lowerCAmelCase : Dict = {"perplexity": perplexity}
lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE )
return results
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase__ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCAmelCase__ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCAmelCase__ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=True , snake_case__=False ):
"""simple docstring"""
if rouge_types is None:
lowerCAmelCase : int = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowerCAmelCase : Optional[Any] = rouge_scorer.RougeScorer(rouge_types=snake_case__ , use_stemmer=snake_case__ )
if use_aggregator:
lowerCAmelCase : Any = scoring.BootstrapAggregator()
else:
lowerCAmelCase : Optional[int] = []
for ref, pred in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = scorer.score(snake_case__ , snake_case__ )
if use_aggregator:
aggregator.add_scores(snake_case__ )
else:
scores.append(snake_case__ )
if use_aggregator:
lowerCAmelCase : Optional[int] = aggregator.aggregate()
else:
lowerCAmelCase : Dict = {}
for key in scores[0]:
lowerCAmelCase : Union[str, Any] = [score[key] for score in scores]
return result
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase : Optional[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE ):
return None
lowerCAmelCase : Optional[int] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCAmelCase : int = left
lowerCAmelCase : Tuple = point
elif point > right:
lowerCAmelCase : str = right
lowerCAmelCase : Dict = point
else:
if item < current_item:
lowerCAmelCase : str = point - 1
else:
lowerCAmelCase : str = point + 1
return None
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , point + 1 , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase__ = 0
if debug == 1:
lowerCAmelCase__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowerCAmelCase__ = 67
lowerCAmelCase__ = interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print('''Not found''')
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
from math import ceil
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_1 ):
'''simple docstring'''
lowerCAmelCase : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase : Optional[int] = 2 * i + 1
lowerCAmelCase : Optional[int] = 2 * i
lowerCAmelCase : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Optional[int] = False
elif args.student_type == "gpt2":
lowerCAmelCase : Union[str, Any] = False
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Any = False
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=SCREAMING_SNAKE_CASE , choices=["distilbert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=SCREAMING_SNAKE_CASE , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=SCREAMING_SNAKE_CASE , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=SCREAMING_SNAKE_CASE , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=SCREAMING_SNAKE_CASE , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=SCREAMING_SNAKE_CASE , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=5_0 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=SCREAMING_SNAKE_CASE , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=SCREAMING_SNAKE_CASE , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=SCREAMING_SNAKE_CASE , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=SCREAMING_SNAKE_CASE , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=SCREAMING_SNAKE_CASE , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=5_6 , help="Random seed" )
parser.add_argument("--log_interval" , type=SCREAMING_SNAKE_CASE , default=5_0_0 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=SCREAMING_SNAKE_CASE , default=4_0_0_0 , help="Checkpoint interval." )
lowerCAmelCase : List[str] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE )
set_seed(SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = MODEL_CLASSES[args.student_type]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase : Optional[Any] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
lowerCAmelCase : List[Any] = special_tok_ids
lowerCAmelCase : List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
lowerCAmelCase : List[Any] = pickle.load(SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
lowerCAmelCase : List[str] = pickle.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase : Tuple = 0.0 # do not predict special tokens
lowerCAmelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
lowerCAmelCase : Optional[Any] = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase : str = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
lowerCAmelCase : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : int = student_model_class(SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
lowerCAmelCase : Any = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase : Tuple = Distiller(
params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCAmelCase__ = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCAmelCase : List[Any] = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
lowerCAmelCase : Optional[int] = config_class.from_json_file(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[Any] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
lowerCAmelCase : str = model_class(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCAmelCase : List[Any] = cached_file(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCAmelCase : Tuple = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
lowerCAmelCase : List[str] = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE ) # build the network
lowerCAmelCase : Any = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
lowerCAmelCase : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase : Dict = pt_model(**pt_model.dummy_inputs )
lowerCAmelCase : List[Any] = pto[0].numpy()
lowerCAmelCase : List[str] = tfo[0].numpy()
lowerCAmelCase : List[str] = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(SCREAMING_SNAKE_CASE , save_format="h5" )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
lowerCAmelCase : Tuple = list(MODEL_CLASSES.keys() )
else:
lowerCAmelCase : int = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
print("=" * 1_0_0 )
print(f""" Converting model type {j}/{len(SCREAMING_SNAKE_CASE )}: {model_type}""" )
print("=" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCAmelCase : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCAmelCase : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , start=1 ):
print("-" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
lowerCAmelCase : Tuple = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}""" )
print("-" * 1_0_0 )
if config_shortcut_name in aws_config_map:
lowerCAmelCase : Dict = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
lowerCAmelCase : Any = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCAmelCase : List[str] = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
lowerCAmelCase : List[str] = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE , config_file=SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE )
os.remove(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowerCAmelCase__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[float] =field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
a : bool =field(default=lowercase , metadata={"help": "Whether to SortishSamler or not."} )
a : bool =field(
default=lowercase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a : bool =field(default=lowercase , metadata={"help": "whether to use adafactor"} )
a : Optional[float] =field(
default=lowercase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
a : Optional[float] =field(
default=lowercase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
a : Optional[float] =field(default=lowercase , metadata={"help": "Dropout probability. Goes into model.config."} )
a : Optional[float] =field(
default=lowercase , metadata={"help": "Attention dropout probability. Goes into model.config."} )
a : Optional[str] =field(
default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : tuple[int, ...] ):
'''simple docstring'''
lowerCAmelCase : str = ""
lowerCAmelCase : int
lowerCAmelCase : int
lowerCAmelCase : int
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE )
return decoded
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : list[str] = []
for key in product(SCREAMING_SNAKE_CASE , repeat=3 ):
lowerCAmelCase : int = try_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE )
return possibles
def a__ ( SCREAMING_SNAKE_CASE : list[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def a__ ( SCREAMING_SNAKE_CASE : str = "p059_cipher.txt" ):
'''simple docstring'''
lowerCAmelCase : list[int]
lowerCAmelCase : list[str]
lowerCAmelCase : str
lowerCAmelCase : str
lowerCAmelCase : str = Path(SCREAMING_SNAKE_CASE ).parent.joinpath(SCREAMING_SNAKE_CASE ).read_text(encoding="utf-8" )
lowerCAmelCase : List[str] = [int(SCREAMING_SNAKE_CASE ) for number in data.strip().split("," )]
lowerCAmelCase : List[str] = filter_valid_chars(SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
lowerCAmelCase : Dict = filter_common_word(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
break
lowerCAmelCase : Tuple = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase : str = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase : List[str] = nn.Parameter(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : int = np.asarray(weights[0] )
lowerCAmelCase : Any = np.asarray(weights[1] )
lowerCAmelCase : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = np.asarray(weights[0] )
lowerCAmelCase : Tuple = np.asarray(weights[1] )
lowerCAmelCase : Union[str, Any] = np.asarray(weights[2] )
lowerCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Any = weights[0][0][0]
lowerCAmelCase : List[str] = np.asarray(layer_norm_a[0] )
lowerCAmelCase : Dict = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
lowerCAmelCase : Tuple = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
lowerCAmelCase : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
lowerCAmelCase : Optional[int] = intermediate_weights[2]
# layernorm 2
lowerCAmelCase : int = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
lowerCAmelCase : Dict = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
lowerCAmelCase : int = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase : Any = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = torch_model.reformer
# word embeds
lowerCAmelCase : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase : List[Any] = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
lowerCAmelCase : int = np.asarray(weights[7][0] )
lowerCAmelCase : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
lowerCAmelCase : str = np.asarray(weights[9][0] )
lowerCAmelCase : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : Optional[int] = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
lowerCAmelCase : int = pickle.load(SCREAMING_SNAKE_CASE )["weights"]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase : Any = False
# source code of `config_class`
lowerCAmelCase : Dict = inspect.getsource(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase , lowerCAmelCase : int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase : Optional[int] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase : int = True
break
lowerCAmelCase : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : Any = "\n".join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] ="bloom"
a : Tuple =["past_key_values"]
a : Optional[Any] ={
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , snake_case__=250_880 , snake_case__=64 , snake_case__=2 , snake_case__=8 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1 , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase : Any = kwargs.pop("n_embed" , snake_case__ )
lowerCAmelCase : Optional[int] = hidden_size if n_embed is None else n_embed
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Optional[Any] = n_head
lowerCAmelCase : Any = layer_norm_epsilon
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Dict = pretraining_tp
lowerCAmelCase : str = apply_residual_connection_post_layernorm
lowerCAmelCase : Optional[int] = hidden_dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = bos_token_id
lowerCAmelCase : Optional[int] = eos_token_id
lowerCAmelCase : Optional[int] = slow_but_exact
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] =version.parse("1.12" )
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , "pad_token_id" , snake_case__ ):
# TODO: how to do that better?
lowerCAmelCase : Union[str, Any] = 0
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case__ , direction="inputs" , inverted_values_shape=snake_case__ )
lowerCAmelCase : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase : Any = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._config.n_head
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-3
def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : int = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase , lowerCAmelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase : int = seqlen + 2
lowerCAmelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase : Optional[int] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
lowerCAmelCase : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase : List[Any] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase : Optional[int] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self ):
"""simple docstring"""
return 13
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] =["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Tuple = size if size is not None else {"height": 384, "width": 384}
lowerCAmelCase : Dict = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : Optional[Any] = size
lowerCAmelCase : Union[str, Any] = resample
lowerCAmelCase : List[Any] = do_rescale
lowerCAmelCase : Dict = rescale_factor
lowerCAmelCase : str = do_normalize
lowerCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase : Any = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase : Optional[int] = do_convert_rgb
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowerCAmelCase : Optional[int] = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : List[str] = resample if resample is not None else self.resample
lowerCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : str = size if size is not None else self.size
lowerCAmelCase : Optional[int] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Dict = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : int = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : Tuple = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase : int = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : Tuple = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Any = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Dict = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
return encoded_outputs
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =StableDiffusionXLImgaImgPipeline
a : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a : int =PipelineTesterMixin.required_optional_params - {"latents"}
a : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Optional[int] =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=snake_case__ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase : Optional[int] = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase : Optional[Any] = CLIPTextModel(snake_case__ )
lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=snake_case__ )
lowerCAmelCase : Optional[int] = CLIPTextModelWithProjection(snake_case__ )
lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : int = image / 2 + 0.5
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : str = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : str = self.get_dummy_components()
lowerCAmelCase : List[str] = StableDiffusionXLImgaImgPipeline(**snake_case__ )
lowerCAmelCase : List[str] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**snake_case__ )
lowerCAmelCase : str = sd_pipe.to(snake_case__ )
lowerCAmelCase : Dict = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
# forward without prompt embeds
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Tuple = 3 * ["this is a negative prompt"]
lowerCAmelCase : Tuple = negative_prompt
lowerCAmelCase : Optional[Any] = 3 * [inputs["prompt"]]
lowerCAmelCase : Tuple = sd_pipe(**snake_case__ )
lowerCAmelCase : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : str = 3 * ["this is a negative prompt"]
lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt" )]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Tuple = sd_pipe.encode_prompt(snake_case__ , negative_prompt=snake_case__ )
lowerCAmelCase : List[Any] = sd_pipe(
**snake_case__ , prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , pooled_prompt_embeds=snake_case__ , negative_pooled_prompt_embeds=snake_case__ , )
lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : int = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : Dict = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_inputs(snake_case__ )
lowerCAmelCase : Dict = pipe(**snake_case__ ).images
lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : List[Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCAmelCase__ = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
lowerCAmelCase__ = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = "rougeLsum"
lowerCAmelCase : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
lowerCAmelCase : Any = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
assert score > score_no_sep
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = ["rouge1", "rouge2", "rougeL"]
lowerCAmelCase : Tuple = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE )
assert score_sep == score_no_sep
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCAmelCase : List[Any] = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE ) == calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCAmelCase : Dict = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCAmelCase : int = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=["rougeLsum"] , newline_sep=SCREAMING_SNAKE_CASE )["rougeLsum"]
lowerCAmelCase : Tuple = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCAmelCase : Dict = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
import cmath
import math
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = math.radians(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = math.radians(SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
lowerCAmelCase : Optional[int] = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowerCAmelCase__ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
lowerCAmelCase__ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
def remove_articles(SCREAMING_SNAKE_CASE : Optional[Any] ):
lowerCAmelCase : int = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE , " " , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE : List[str] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return int(normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[str] = [any(compute_exact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return (sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )) * 1_0_0
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCAmelCase : List[str] = Counter(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = Counter(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = Counter()
for sgram, scount in sgramcounter.items():
lowerCAmelCase : Union[str, Any] = scount * numref
lowerCAmelCase : Dict = Counter(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
lowerCAmelCase : Optional[Any] = ccount * numref
# KEEP
lowerCAmelCase : Optional[Any] = sgramcounter_rep & cgramcounter_rep
lowerCAmelCase : List[str] = keepgramcounter_rep & rgramcounter
lowerCAmelCase : List[Any] = sgramcounter_rep & rgramcounter
lowerCAmelCase : str = 0
lowerCAmelCase : List[str] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : int = 1
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : Union[str, Any] = keeptmpscorea / len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCAmelCase : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCAmelCase : List[str] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCAmelCase : Any = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCAmelCase : Any = sgramcounter_rep - cgramcounter_rep
lowerCAmelCase : Optional[Any] = delgramcounter_rep - rgramcounter
lowerCAmelCase : Union[str, Any] = sgramcounter_rep - rgramcounter
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase : Any = 1
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : List[str] = deltmpscorea / len(SCREAMING_SNAKE_CASE )
# ADDITION
lowerCAmelCase : Union[str, Any] = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = set(SCREAMING_SNAKE_CASE ) & set(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase : List[str] = 1
lowerCAmelCase : str = 1
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : str = addtmpscore / len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : str = addtmpscore / len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCAmelCase : List[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = ssent.split(" " )
lowerCAmelCase : Dict = csent.split(" " )
lowerCAmelCase : List[str] = []
lowerCAmelCase : List[str] = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Dict = []
lowerCAmelCase : Any = []
lowerCAmelCase : int = []
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[str] = []
for rsent in rsents:
lowerCAmelCase : int = rsent.split(" " )
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[Any] = []
ragramslist.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : int = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
lowerCAmelCase : Union[str, Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
lowerCAmelCase : Optional[int] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
ragramslist.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
lowerCAmelCase : Tuple = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
lowerCAmelCase : int = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE )
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : str = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 2:
lowerCAmelCase : List[str] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE )
if i < len(SCREAMING_SNAKE_CASE ) - 3:
lowerCAmelCase : List[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : List[str] = SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : Any = SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : Optional[Any] = SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : Union[str, Any] = SARIngram(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCAmelCase : int = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCAmelCase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCAmelCase : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "13a" , SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
if lowercase:
lowerCAmelCase : Any = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCAmelCase : str = sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE )()(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
lowerCAmelCase : Optional[Any] = sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE , escape=SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
lowerCAmelCase : List[str] = sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE , return_str=SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Optional[int] = sentence
if not return_str:
lowerCAmelCase : str = normalized_sent.split()
return normalized_sent
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not (len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )):
raise ValueError("Sources length must match predictions and references lengths." )
lowerCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE ) , normalize(SCREAMING_SNAKE_CASE ) , [normalize(SCREAMING_SNAKE_CASE ) for sent in refs] )
lowerCAmelCase : List[Any] = sari_score / len(SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int="exp" , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Optional[Any]=False , ):
'''simple docstring'''
lowerCAmelCase : List[str] = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE )]
lowerCAmelCase : Tuple = sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , smooth_method=SCREAMING_SNAKE_CASE , smooth_value=SCREAMING_SNAKE_CASE , force=SCREAMING_SNAKE_CASE , lowercase=SCREAMING_SNAKE_CASE , use_effective_order=SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = {}
result.update({"sari": compute_sari(sources=snake_case__ , predictions=snake_case__ , references=snake_case__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=snake_case__ , references=snake_case__ )} )
result.update({"exact": compute_em(predictions=snake_case__ , references=snake_case__ )} )
return result
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase : List[Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case__ , multi_process=snake_case__ , )
lowerCAmelCase : List[Any] = TensorFlowBenchmark(snake_case__ )
lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "sgugger/tiny-distilbert-classification"
lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , only_pretrain_model=snake_case__ , )
lowerCAmelCase : Any = TensorFlowBenchmark(snake_case__ )
lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
lowerCAmelCase : Dict = TensorFlowBenchmark(snake_case__ )
lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case__ , multi_process=snake_case__ , )
lowerCAmelCase : List[Any] = TensorFlowBenchmark(snake_case__ , [config] )
lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "sshleifer/tiny-gpt2"
lowerCAmelCase : int = AutoConfig.from_pretrained(snake_case__ )
lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
lowerCAmelCase : str = TensorFlowBenchmark(snake_case__ , [config] )
lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
lowerCAmelCase : str = TensorFlowBenchmark(snake_case__ )
lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
lowerCAmelCase : Tuple = TensorFlowBenchmark(snake_case__ , [config] )
lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "patrickvonplaten/t5-tiny-random"
lowerCAmelCase : int = AutoConfig.from_pretrained(snake_case__ )
lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , )
lowerCAmelCase : Any = TensorFlowBenchmark(snake_case__ , configs=[config] )
lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case__ , multi_process=snake_case__ , )
lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(snake_case__ )
lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case__ , save_to_csv=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case__ , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(snake_case__ , "inf_mem.csv" ) , env_info_csv_file=os.path.join(snake_case__ , "env.csv" ) , multi_process=snake_case__ , )
lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(snake_case__ )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case__ , "env.csv" ) ).exists() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(snake_case__ ):
self.assertTrue(hasattr(snake_case__ , "sequential" ) )
self.assertTrue(hasattr(snake_case__ , "cumulative" ) )
self.assertTrue(hasattr(snake_case__ , "current" ) )
self.assertTrue(hasattr(snake_case__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case__ , "log.txt" ) , log_print=snake_case__ , trace_memory_line_by_line=snake_case__ , eager_mode=snake_case__ , multi_process=snake_case__ , )
lowerCAmelCase : Optional[int] = TensorFlowBenchmark(snake_case__ )
lowerCAmelCase : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(snake_case__ , "log.txt" ) ).exists() )
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = [0] * len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = []
lowerCAmelCase : Tuple = [1] * len(SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE )
while queue:
lowerCAmelCase : List[Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE )
print(max(SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
lowerCAmelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.