code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowercase = '''true'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=16 ):
'''simple docstring'''
set_seed(42 )
__UpperCamelCase :List[str] = RegressionModel()
__UpperCamelCase :List[Any] = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
__UpperCamelCase , __UpperCamelCase :Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__UpperCamelCase :Optional[int] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
__UpperCamelCase :Any = dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__UpperCamelCase :Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=16 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = get_dataloader(SCREAMING_SNAKE_CASE , not dispatch_batches )
__UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = []
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase :Tuple = batch.values()
with torch.no_grad():
__UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCamelCase , __UpperCamelCase :List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE )
targs.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE ), torch.cat(SCREAMING_SNAKE_CASE )
return logits, targs
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=16 ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = get_basic_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = generate_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert (
len(SCREAMING_SNAKE_CASE ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE )}"""
def lowerCamelCase ( SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :Dict = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase , __UpperCamelCase :Dict = get_mrpc_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# First do baseline
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = setup['''no''']
model.to(SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE )
with torch.inference_mode():
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=batch['''labels'''] )
__UpperCamelCase :Tuple = metric.compute()
# Then do distributed
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = outputs.logits.argmax(dim=-1 )
__UpperCamelCase :Optional[Any] = batch['''labels''']
__UpperCamelCase , __UpperCamelCase :Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCamelCase :Union[str, Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__UpperCamelCase :Union[str, Any] = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 43 | import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43 | 1 |
import heapq
import sys
import numpy as np
lowerCamelCase : Dict = tuple[int, int]
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = []
__lowercase : Optional[Any] = set()
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.elements ) == 0
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : Any ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__lowercase : List[str] = []
((__lowercase) , (__lowercase)) : str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) : str = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase ( self : Tuple , __a : List[Any] ) -> int:
"""simple docstring"""
if item in self.set:
self.set.remove(__a )
__lowercase : List[str] = []
((__lowercase) , (__lowercase)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) : Optional[int] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.elements[0][1]
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
((__lowercase) , (__lowercase)) : Union[str, Any] = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos ):
# euclidean distance
__lowercase : Union[str, Any] = np.array(lowerCAmelCase_ )
__lowercase : int = np.array(lowerCAmelCase_ )
return np.linalg.norm(a - b )
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos ):
# integer division by time variable
return consistent_heuristic(lowerCAmelCase_ , lowerCAmelCase_ ) // t
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : int , lowerCAmelCase_ : TPos , lowerCAmelCase_ : dict[TPos, float] ):
__lowercase : Optional[Any] = g_function[start] + Wa * heuristics[i](lowerCAmelCase_ , lowerCAmelCase_ )
return ans
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[int] = np.chararray((n, n) )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = """*"""
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (j, (n - 1) - i) in blocks:
__lowercase : Tuple = """#"""
__lowercase : int = """-"""
__lowercase : List[str] = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) : Optional[Any] = x
# print(x)
__lowercase : Union[str, Any] = """-"""
__lowercase : int = back_pointer[x]
__lowercase : List[str] = """-"""
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase : List[str] = back_pointer[goal]
while x != start:
print(lowerCAmelCase_ , end=""" """ )
__lowercase : Any = back_pointer[x]
print(lowerCAmelCase_ )
sys.exit()
def snake_case_ ( lowerCAmelCase_ : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , ):
for itera in range(lowerCAmelCase_ ):
open_list[itera].remove_element(lowerCAmelCase_ )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) : Any = s
__lowercase : List[str] = (x - 1, y)
__lowercase : Dict = (x + 1, y)
__lowercase : Optional[Any] = (x, y + 1)
__lowercase : Optional[int] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase_ )
__lowercase : Optional[Any] = -1
__lowercase : Dict = float("""inf""" )
if valid(lowerCAmelCase_ ) and g_function[neighbours] > g_function[s] + 1:
__lowercase : Dict = g_function[s] + 1
__lowercase : Dict = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase_ , key(lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase_ ):
if key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) <= Wa * key(
lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ ):
open_list[j].put(
lowerCAmelCase_ , key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
def snake_case_ ( ):
__lowercase : Tuple = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase : Optional[int] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCamelCase : int = make_common_ground()
lowerCamelCase : List[str] = blocks_blk
# hyper parameters
lowerCamelCase : List[str] = 1
lowerCamelCase : Dict = 1
lowerCamelCase : Any = 20
lowerCamelCase : Union[str, Any] = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase : Tuple = (0, 0)
lowerCamelCase : str = (n - 1, n - 1)
lowerCamelCase : List[str] = 1
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos , lowerCAmelCase_ : int ):
__lowercase : Any = {start: 0, goal: float("""inf""" )}
__lowercase : List[Any] = {start: -1, goal: -1}
__lowercase : Tuple = []
__lowercase : Dict = set()
for i in range(lowerCAmelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase_ , key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowercase : list[int] = []
__lowercase : list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCAmelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowercase , __lowercase : Tuple = open_list[i].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
close_list_inad.append(lowerCAmelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowercase : List[str] = open_list[0].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
close_list_anchor.append(lowerCAmelCase_ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase_ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 306 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 306 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase_ : Tuple = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase__ ( _UpperCAmelCase = "mumbai" ):
"""simple docstring"""
A_ : str = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
A_ : List[str] = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
A_ : Optional[Any] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}") | 286 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Optional[int] = FileLock(str(tmpdir / "foo.lock" ) )
__lowerCAmelCase: str = FileLock(str(tmpdir / "foo.lock" ) )
__lowerCAmelCase: Tuple = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
__lowerCAmelCase: int = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: List[str] = "a" * 1_0_0_0 + ".lock"
__lowerCAmelCase: List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
__lowerCAmelCase: Optional[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 368 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=1_4_0_8 , UpperCamelCase__ : Tuple=6_1_4_4 , UpperCamelCase__ : Tuple=3_9 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Optional[Any]=2_2_4 , UpperCamelCase__ : List[str]=1_4 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[int]=0.00001 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=1e-10 , UpperCamelCase__ : List[str]=True , **UpperCamelCase__ : List[str] , )-> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: List[str] = hidden_size
__lowerCAmelCase: Tuple = intermediate_size
__lowerCAmelCase: str = num_hidden_layers
__lowerCAmelCase: Any = num_attention_heads
__lowerCAmelCase: Any = patch_size
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = attention_dropout
__lowerCAmelCase: List[Any] = layer_norm_eps
__lowerCAmelCase: Any = hidden_act
__lowerCAmelCase: int = qkv_bias
@classmethod
def lowercase_ ( cls : List[str] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : List[str])-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
__lowerCAmelCase: Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : List[str] = """blip_2_qformer"""
def __init__( self : Optional[int] , UpperCamelCase__ : Dict=3_0_5_2_2 , UpperCamelCase__ : Union[str, Any]=7_6_8 , UpperCamelCase__ : Union[str, Any]=1_2 , UpperCamelCase__ : Union[str, Any]=1_2 , UpperCamelCase__ : Optional[int]=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[int]="absolute" , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[Any]=1_4_0_8 , **UpperCamelCase__ : List[str] , )-> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: str = hidden_size
__lowerCAmelCase: Dict = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: List[Any] = hidden_act
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: List[str] = layer_norm_eps
__lowerCAmelCase: Optional[Any] = position_embedding_type
__lowerCAmelCase: Dict = cross_attention_frequency
__lowerCAmelCase: List[Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls : int , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple)-> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
__lowerCAmelCase: Any = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = """blip-2"""
SCREAMING_SNAKE_CASE_ : str = True
def __init__( self : List[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=3_2 , **UpperCamelCase__ : Dict)-> List[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
if vision_config is None:
__lowerCAmelCase: Dict = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
if qformer_config is None:
__lowerCAmelCase: Union[str, Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
if text_config is None:
__lowerCAmelCase: Dict = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
__lowerCAmelCase: List[str] = BlipaVisionConfig(**UpperCamelCase__)
__lowerCAmelCase: str = BlipaQFormerConfig(**UpperCamelCase__)
__lowerCAmelCase: Optional[int] = text_config["model_type"] if "model_type" in text_config else "opt"
__lowerCAmelCase: Optional[int] = CONFIG_MAPPING[text_model_type](**UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.text_config.tie_word_embeddings
__lowerCAmelCase: List[Any] = self.text_config.is_encoder_decoder
__lowerCAmelCase: int = num_query_tokens
__lowerCAmelCase: Union[str, Any] = self.vision_config.hidden_size
__lowerCAmelCase: Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowerCAmelCase: Dict = 1.0
__lowerCAmelCase: Dict = 0.02
@classmethod
def lowercase_ ( cls : str , UpperCamelCase__ : BlipaVisionConfig , UpperCamelCase__ : BlipaQFormerConfig , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : Union[str, Any] , )-> Tuple:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def lowercase_ ( self : Any)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: str = copy.deepcopy(self.__dict__)
__lowerCAmelCase: Optional[int] = self.vision_config.to_dict()
__lowerCAmelCase: List[Any] = self.qformer_config.to_dict()
__lowerCAmelCase: str = self.text_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 108 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__a = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : List[Any] = test_results.split(""" """ )
snake_case__ : Tuple = 0
snake_case__ : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case__ : Any = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __snake_case( _lowerCAmelCase ) -> str:
snake_case__ : Optional[Any] = {}
snake_case__ : List[str] = None
snake_case__ : List[str] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , lowerCAmelCase__ ):
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
snake_case__ : Union[str, Any] = line
snake_case__ : int = False
return failures
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : str , snake_case_ : Dict ):
snake_case__ : str = title
snake_case__ : Optional[int] = doc_test_results['time_spent'].split(""",""" )[0]
snake_case__ : int = doc_test_results['success']
snake_case__ : Dict = doc_test_results['failures']
snake_case__ : Optional[int] = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case__ : Any = doc_test_results
@property
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = [self._time_spent]
snake_case__ : Any = 0
for time in time_spent:
snake_case__ : Optional[Any] = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE_ ) == 1:
snake_case__ : Any = [0, 0, time_parts[0]]
snake_case__ : List[str] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
snake_case__ : Union[str, Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f"{int(SCREAMING_SNAKE_CASE_ )}h{int(SCREAMING_SNAKE_CASE_ )}m{int(SCREAMING_SNAKE_CASE_ )}s"
@property
def lowerCamelCase ( self : int ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCamelCase ( self : str ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowerCamelCase ( self : List[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowerCamelCase ( self : Tuple ):
snake_case__ : int = 40
snake_case__ : List[Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
snake_case__ : List[str] = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowerCamelCase ( self : int ):
snake_case__ : int = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE_ )
@staticmethod
def lowerCamelCase ( ):
snake_case__ : Tuple = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(SCREAMING_SNAKE_CASE_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=SCREAMING_SNAKE_CASE_ , )
def lowerCamelCase ( self : Union[str, Any] ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
snake_case__ : Any = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.'
snake_case__ : str = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=SCREAMING_SNAKE_CASE_ , )
def lowerCamelCase ( self : str , snake_case_ : int , snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict ):
snake_case__ : List[Any] = ''
for key, value in failures.items():
snake_case__ : List[Any] = value[:200] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE_ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
snake_case__ : int = job_name
snake_case__ : Dict = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case__ : str = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCamelCase ( self : Union[str, Any] ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
snake_case__ : Dict = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
snake_case__ : Dict = sorted(self.doc_test_results.items() , key=lambda snake_case_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
snake_case__ : Tuple = f"*Num failures* :{len(job_result['failed'] )} \n"
snake_case__ : List[str] = job_result['failures']
snake_case__ : Union[str, Any] = self.get_reply_blocks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=SCREAMING_SNAKE_CASE_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __snake_case( ) -> str:
snake_case__ : Union[str, Any] = os.environ['GITHUB_RUN_ID']
snake_case__ : int = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(lowerCAmelCase__ ).json()
snake_case__ : List[str] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : Optional[Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
snake_case__ : int = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , lowerCAmelCase__ )
return {}
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : int = {}
if os.path.exists(lowerCAmelCase__ ):
snake_case__ : Tuple = os.listdir(lowerCAmelCase__ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding="""utf-8""" ) as f:
snake_case__ : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}." ) from e
return _artifact
def __snake_case( ) -> Dict:
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : str ):
snake_case__ : List[Any] = name
snake_case__ : Tuple = []
def __str__( self : Optional[Any] ):
return self.name
def lowerCamelCase ( self : List[Any] , snake_case_ : str ):
self.paths.append({"""name""": self.name, """path""": path} )
snake_case__ : Dict[str, Artifact] = {}
snake_case__ : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case__ : int = directory
if artifact_name not in _available_artifacts:
snake_case__ : Optional[Any] = Artifact(lowerCAmelCase__ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
__a = get_job_links()
__a = retrieve_available_artifacts()
__a = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__a = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__a = github_actions_job_links.get("run_doctests")
__a = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__a = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__a = handle_test_results(artifact["stats"])
__a = failed
__a = success
__a = time_spent[1:-1] + """, """
__a = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__a = line.replace("FAILED ", "")
__a = line.split()[0].replace("\n", "")
if "::" in line:
__a = line.split("::")
else:
__a = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__a = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__a = all_failures[test] if test in all_failures else """N/A"""
__a = failure
break
__a = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 35 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """linear"""
_SCREAMING_SNAKE_CASE = """cosine"""
_SCREAMING_SNAKE_CASE = """cosine_with_restarts"""
_SCREAMING_SNAKE_CASE = """polynomial"""
_SCREAMING_SNAKE_CASE = """constant"""
_SCREAMING_SNAKE_CASE = """constant_with_warmup"""
_SCREAMING_SNAKE_CASE = """piecewise_constant"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int = -1 ) -> Tuple:
"""simple docstring"""
return LambdaLR(lowerCAmelCase__ , lambda lowerCAmelCase__ : 1 , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int = -1 ) -> str:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 , lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : str , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = rule_str.split(':' )
lowerCAmelCase_ : List[str] = int(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = float(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = value
lowerCAmelCase_ : str = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
def rule_func(lowerCAmelCase__ : int ) -> float:
lowerCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Dict = create_rules_function(lowerCAmelCase__ , lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 0.5 , lowerCAmelCase__ : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : Any ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : int=-1 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : Any = lr_init - lr_end
lowerCAmelCase_ : int = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, SchedulerType] , lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = -1 , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = SchedulerType(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ , step_rules=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , num_cycles=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , power=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
| 224 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = ["""model.decoder.embed_positions.weights"""]
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
if "emb" in name:
UpperCamelCase__ = name.replace('''emb''', '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCamelCase__ = name.replace('''transformer''', '''model.decoder''' )
if "cross_attention" in name:
UpperCamelCase__ = name.replace('''cross_attention''', '''encoder_attn''' )
if "linear1" in name:
UpperCamelCase__ = name.replace('''linear1''', '''fc1''' )
if "linear2" in name:
UpperCamelCase__ = name.replace('''linear2''', '''fc2''' )
if "norm1" in name:
UpperCamelCase__ = name.replace('''norm1''', '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCamelCase__ = name.replace('''norm_cross''', '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCamelCase__ = name.replace('''norm2''', '''final_layer_norm''' )
if "out_norm" in name:
UpperCamelCase__ = name.replace('''out_norm''', '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCamelCase__ = name.replace('''linears''', '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase__ = name.replace('''condition_provider.conditioners.description.output_proj''', '''enc_to_dec_proj''' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = list(state_dict.keys() )
UpperCamelCase__ = {}
for key in keys:
UpperCamelCase__ = state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase__ = val[:hidden_size, :]
UpperCamelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase__ = val
else:
UpperCamelCase__ = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCamelCase__ = 1024
UpperCamelCase__ = 24
UpperCamelCase__ = 16
elif checkpoint == "medium":
UpperCamelCase__ = 1536
UpperCamelCase__ = 48
UpperCamelCase__ = 24
elif checkpoint == "large":
UpperCamelCase__ = 2048
UpperCamelCase__ = 48
UpperCamelCase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase__ = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__, ffn_dim=hidden_size * 4, num_hidden_layers=UpperCamelCase__, num_attention_heads=UpperCamelCase__, )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : List[Any]="cpu" ):
'''simple docstring'''
UpperCamelCase__ = MusicGen.get_pretrained(UpperCamelCase__, device=UpperCamelCase__ )
UpperCamelCase__ = decoder_config_from_checkpoint(UpperCamelCase__ )
UpperCamelCase__ = fairseq_model.lm.state_dict()
UpperCamelCase__ , UpperCamelCase__ = rename_state_dict(
UpperCamelCase__, hidden_size=decoder_config.hidden_size )
UpperCamelCase__ = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCamelCase__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCamelCase__ = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase__ , UpperCamelCase__ = decoder.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase__ = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__, audio_encoder=UpperCamelCase__, decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
UpperCamelCase__ = torch.arange(0, 8, dtype=torch.long ).reshape(2, -1 )
UpperCamelCase__ = input_ids.reshape(2 * 4, -1 )
with torch.no_grad():
UpperCamelCase__ = model(input_ids=UpperCamelCase__, decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCamelCase__ = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''', padding_side='''left''' )
UpperCamelCase__ = MusicgenProcessor(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
UpperCamelCase__ = 2048
UpperCamelCase__ = 2048
# set other default generation config params
UpperCamelCase__ = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase__ = True
UpperCamelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 35 | import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : Any , *_a : Optional[Any] , **_a : Any ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 35 | 1 |
'''simple docstring'''
A ={
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A ={value: key for key, value in encode_dict.items()}
def snake_case_ (_a : str ):
UpperCAmelCase = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def snake_case_ (_a : str ):
if set(_a ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
UpperCAmelCase = ''''''
for word in coded.split():
while len(_a ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_snake_case = {
"camembert-base": 512,
}
_snake_case = "▁"
class UpperCAmelCase_ ( lowerCamelCase_):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = CamembertTokenizer
def __init__( self, __a=None, __a=None, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=["<s>NOTUSED", "</s>NOTUSED"], **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase, _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase, tokenizer_file=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : str = False if not self.vocab_file else True
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(_UpperCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[Any] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file, _UpperCAmelCase)
return (out_vocab_file,)
| 367 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: str , UpperCAmelCase_: pyspark.sql.DataFrame , UpperCAmelCase_: Optional[NamedSplit] = None , UpperCAmelCase_: Optional[Features] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: str = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: str = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: str = "arrow" , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = load_from_cache_file
_SCREAMING_SNAKE_CASE = file_format
_SCREAMING_SNAKE_CASE = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_SCREAMING_SNAKE_CASE = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 306 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=32 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=[10, 20, 30, 40] , lowerCAmelCase_=[2, 2, 3, 2] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=["stage2", "stage3", "stage4"] , lowerCAmelCase_=[2, 3, 4] , lowerCAmelCase_=None , ) -> int:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = out_features
_snake_case = out_indices
_snake_case = scope
def lowerCAmelCase ( self ) -> Any:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = ConvNextModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = ConvNextForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = ConvNextBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case = None
_snake_case = ConvNextBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Dict:
_snake_case = ConvNextModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def lowerCAmelCase ( self ) -> int:
pass
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ConvNextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self ) -> Any:
_snake_case = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(lowerCAmelCase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase , _lowerCamelCase ):
lowerCAmelCase_ = (ConvNextBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = ConvNextConfig
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Dict:
_snake_case = ConvNextModelTester(self )
| 295 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295 | 1 |
import string
def UpperCamelCase( __UpperCamelCase : str ):
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ : List[Any] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ : Optional[int] = string.ascii_uppercase.find(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = num - key
if num < 0:
lowerCAmelCase_ : int = num + len(string.ascii_uppercase )
lowerCAmelCase_ : int = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ : Optional[int] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def UpperCamelCase( ):
lowerCAmelCase_ : Dict = input('''Encrypted message: ''' )
lowerCAmelCase_ : List[str] = message.upper()
decrypt(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 103 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[Any] ="dummy_data"
a : int ="datasets"
a : Tuple =False
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = True , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : Tuple = 0
lowerCAmelCase : int = dataset_name
lowerCAmelCase : List[Any] = cache_dir
lowerCAmelCase : List[str] = use_local_dummy_data
lowerCAmelCase : List[str] = config
# download_callbacks take a single url as input
lowerCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase : Union[str, Any] = str(snake_case__ )
# to be downloaded
lowerCAmelCase : List[Any] = None
lowerCAmelCase : List[Any] = None
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase : Any = self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase : str = cached_path(
snake_case__ , cache_dir=self.cache_dir , extract_compressed_file=snake_case__ , force_extract=snake_case__ )
return os.path.join(snake_case__ , self.dummy_file_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowercase__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase : int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case__ , snake_case__ ):
return self.create_dummy_data_dict(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , (list, tuple) ):
return self.create_dummy_data_list(snake_case__ , snake_case__ )
else:
return self.create_dummy_data_single(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return path
def lowercase__ ( self ):
"""simple docstring"""
return {}
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case__ , snake_case__ ):
for single_url in single_urls:
download_callback(snake_case__ )
else:
lowerCAmelCase : List[str] = single_urls
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = [os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase : int = single_urls
lowerCAmelCase : Any = os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) )
lowerCAmelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(snake_case__ , snake_case__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase : Optional[Any] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , snake_case__ ) ) for url in data_url )
lowerCAmelCase : Any = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase : int = [data_url[0]] * len(snake_case__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Dict = os.path.join(snake_case__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(snake_case__ )
return dummy_data_list
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Tuple = os.path.join(snake_case__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(snake_case__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
def _iter_archive_members(snake_case__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase : str = Path(self.dummy_file ).parent
lowerCAmelCase : Optional[Any] = path.relative_to(snake_case__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case__ )
lowerCAmelCase : List[Any] = Path(snake_case__ )
lowerCAmelCase : str = _iter_archive_members(snake_case__ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(snake_case__ ).as_posix(), file_path.open("rb" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(snake_case__ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(snake_case__ , snake_case__ )
| 108 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a__ ( SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : str =field(
metadata={"help": "The csv file to plot."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
a : bool =field(
default=lowercase , metadata={"help": "Disable logarithmic scale when plotting"} , )
a : bool =field(
default=lowercase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
a : Optional[List[str]] =list_field(
default=lowercase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = args
lowerCAmelCase : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
lowerCAmelCase : List[Any] = csv.DictReader(snake_case__ )
for row in reader:
lowerCAmelCase : Dict = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
lowerCAmelCase : Optional[Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
lowerCAmelCase : Any = float(row["result"] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = plt.subplots()
lowerCAmelCase : Any = "Time usage" if self.args.is_time else "Memory usage"
lowerCAmelCase : str = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase : Tuple = sorted(set(self.result_dict[model_name]["bsz"] ) )
lowerCAmelCase : Dict = sorted(set(self.result_dict[model_name]["seq_len"] ) )
lowerCAmelCase : int = self.result_dict[model_name]["result"]
((lowerCAmelCase) , (lowerCAmelCase)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase : Dict = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=snake_case__ , )
else:
lowerCAmelCase : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase) , (lowerCAmelCase)) : Tuple = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
lowerCAmelCase : int = np.asarray(snake_case__ , snake_case__ )[: len(snake_case__ )]
plt.scatter(
snake_case__ , snake_case__ , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(snake_case__ , snake_case__ , "--" )
title_str += f""" {label_model_name} vs."""
lowerCAmelCase : Dict = title_str[:-4]
lowerCAmelCase : Optional[int] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(snake_case__ )
plt.xlabel(snake_case__ )
plt.ylabel(snake_case__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase : Dict = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 133 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =KandinskyVaaImgaImgPipeline
a : Optional[int] =["image_embeds", "negative_image_embeds", "image"]
a : Optional[int] =[
"image_embeds",
"negative_image_embeds",
"image",
]
a : str =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Dict =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.dummy_unet
lowerCAmelCase : Optional[int] = self.dummy_movq
lowerCAmelCase : List[str] = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase : Tuple = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : List[str] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu"
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Optional[Any] = "A red cartoon frog, 4k"
lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Tuple = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 133 | 1 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> str:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
snake_case__ : List[str] = False
if num < 0:
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = -num
snake_case__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
from PIL import Image
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image:
def brightness(_lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__a = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 35 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Any = 'convbert'
def __init__(self : Tuple , a__ : int=3_0522 , a__ : Union[str, Any]=768 , a__ : Any=12 , a__ : Any=12 , a__ : Optional[Any]=3072 , a__ : List[str]="gelu" , a__ : Optional[int]=0.1 , a__ : Tuple=0.1 , a__ : List[Any]=512 , a__ : Dict=2 , a__ : Tuple=0.0_2 , a__ : Tuple=1E-12 , a__ : Optional[int]=1 , a__ : List[str]=0 , a__ : int=2 , a__ : Tuple=768 , a__ : List[Any]=2 , a__ : Optional[int]=9 , a__ : List[str]=1 , a__ : Dict=None , **a__ : str , ):
"""simple docstring"""
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ , )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = embedding_size
__snake_case = head_ratio
__snake_case = conv_kernel_size
__snake_case = num_groups
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@property
def a (self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 238 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 238 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
a__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : CLIPSegForImageSegmentation , UpperCAmelCase__ : CLIPSegProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__SCREAMING_SNAKE_CASE = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = dict(scheduler.config )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FrozenDict(UpperCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__SCREAMING_SNAKE_CASE = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = dict(scheduler.config )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FrozenDict(UpperCAmelCase__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCAmelCase__ , segmentation_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__SCREAMING_SNAKE_CASE = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
__SCREAMING_SNAKE_CASE = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__SCREAMING_SNAKE_CASE = self.segmentation_model(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , mask_image=UpperCAmelCase__ , height=UpperCAmelCase__ , width=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , latents=UpperCAmelCase__ , output_type=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=UpperCAmelCase__ , )
| 54 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCAmelCase ):
return None
SCREAMING_SNAKE_CASE_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = point
elif point > right:
SCREAMING_SNAKE_CASE_ = right
SCREAMING_SNAKE_CASE_ = point
else:
if item < current_item:
SCREAMING_SNAKE_CASE_ = point - 1
else:
SCREAMING_SNAKE_CASE_ = point + 1
return None
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> List[str]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCAmelCase , __UpperCAmelCase , point + 1 , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[str]:
if collection != sorted(__UpperCAmelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowerCamelCase__ : Dict = 0
if debug == 1:
lowerCamelCase__ : str = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
lowerCamelCase__ : Union[str, Any] = 67
lowerCamelCase__ : Tuple = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('Not found') | 210 |
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict[str, str]:
SCREAMING_SNAKE_CASE_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE_ = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE_ = {alphabet[i]: char for i, char in enumerate(__UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__UpperCAmelCase ) , 26 ):
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
SCREAMING_SNAKE_CASE_ = char
return cipher_alphabet
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
return "".join(cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
SCREAMING_SNAKE_CASE_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( ) -> None:
SCREAMING_SNAKE_CASE_ = input('Enter message to encode or decode: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Enter keyword: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE_ = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
SCREAMING_SNAKE_CASE_ = create_cipher_map(__UpperCAmelCase )
print(func(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 210 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : int =DPRContextEncoderTokenizer
class A ( A_ ):
UpperCamelCase_ : Any =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer
lowerCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class A :
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
elif titles is None or texts is None:
__lowercase= titles if texts is None else texts
return super().__call__(
lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles]
__lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts]
__lowercase= len(lowerCAmelCase )
__lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'
__lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase )
]
}
if return_attention_mask is not False:
__lowercase= []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase= attention_mask
return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ):
__lowercase= reader_input['input_ids']
__lowercase, __lowercase, __lowercase= reader_output[:3]
__lowercase= len(lowerCAmelCase )
__lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ )
__lowercase= []
for doc_id in sorted_docs:
__lowercase= list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase= sequence_ids.index(self.pad_token_id )
else:
__lowercase= len(lowerCAmelCase )
__lowercase= self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= []
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
__lowercase= []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
__lowercase= end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class A ( A_ , A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Dict =DPRReaderTokenizer
| 295 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = '''▁'''
lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCAmelCase = {
'''google/pegasus-xsum''': 5_1_2,
}
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int =['''input_ids''', '''attention_mask''']
def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is'
f' {type(lowerCAmelCase )}' )
__lowercase= (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 )
]
if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowercase= additional_special_tokens_extended
else:
__lowercase= [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
__lowercase= {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowercase= mask_token_sent
__lowercase= vocab_file
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# add special tokens to encoder dict
__lowercase= {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase= {v: k for k, v in self.encoder.items()}
@property
def _A (self ):
return len(self.sp_model ) + self.offset
def _A (self ):
__lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase= {}
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A (self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def _A (self , lowerCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase= self.sp_model.piece_to_id(lowerCAmelCase )
return sp_id + self.offset
def _A (self , lowerCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase= self.sp_model.IdToPiece(index - self.offset )
return token
def _A (self , lowerCAmelCase ):
__lowercase= []
__lowercase= ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowercase= []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def _A (self , lowerCAmelCase=False ):
return 1
def _A (self , lowerCAmelCase ):
__lowercase= set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , 'wb' ) as fi:
__lowercase= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 295 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
def __init__( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Any=sys.maxsize ) -> Any:
"""simple docstring"""
lowercase__ = '''bilinear'''
lowercase__ = max_size
lowercase__ = short_edge_length
def __call__( self: List[Any] , UpperCamelCase_: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = []
for img in imgs:
lowercase__ , lowercase__ = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase__ = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowercase__ , lowercase__ = size, scale * w
else:
lowercase__ , lowercase__ = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowercase__ = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = newh * scale
lowercase__ = neww * scale
lowercase__ = int(neww + 0.5 )
lowercase__ = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase__ = Image.fromarray(UpperCamelCase_ )
lowercase__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase__ = np.asarray(UpperCamelCase_ )
else:
lowercase__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase__ = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class _a :
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase__ = cfg.INPUT.FORMAT
lowercase__ = cfg.SIZE_DIVISIBILITY
lowercase__ = cfg.PAD_VALUE
lowercase__ = cfg.INPUT.MAX_SIZE_TEST
lowercase__ = cfg.MODEL.DEVICE
lowercase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase__ = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowercase__ = [im.shape[-2:] for im in images]
lowercase__ = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=False ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase__ = torch.tensor([im.shape[:2] for im in images] )
lowercase__ = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase__ = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowercase__ , lowercase__ = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase__ = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
lowercase__ , lowercase__ = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 93 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = 'Create a default config file for Accelerate with only a few flags set.'
def _a ( SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE = default_json_config_file , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = Path(SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowercase__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowercase__ = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
lowercase__ = num_gpus
lowercase__ = False
if num_gpus > 1:
lowercase__ = '''MULTI_GPU'''
else:
lowercase__ = '''NO'''
elif is_xpu_available() and use_xpu:
lowercase__ = torch.xpu.device_count()
lowercase__ = num_xpus
lowercase__ = False
if num_xpus > 1:
lowercase__ = '''MULTI_XPU'''
else:
lowercase__ = '''NO'''
elif is_npu_available():
lowercase__ = torch.npu.device_count()
lowercase__ = num_npus
lowercase__ = False
if num_npus > 1:
lowercase__ = '''MULTI_NPU'''
else:
lowercase__ = '''NO'''
else:
lowercase__ = 0
lowercase__ = True
lowercase__ = 1
lowercase__ = '''NO'''
lowercase__ = ClusterConfig(**SCREAMING_SNAKE_CASE )
config.to_json_file(SCREAMING_SNAKE_CASE )
return path
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = parser.add_parser('''default''' , parents=SCREAMING_SNAKE_CASE , help=SCREAMING_SNAKE_CASE , formatter_class=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=SCREAMING_SNAKE_CASE , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 93 | 1 |
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 1
while len(snake_case_ ) < 1e6:
constant.append(str(snake_case_ ) )
i += 1
_UpperCAmelCase = "".join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 133 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_ : int = data_utils.TransfoXLCorpus
lowercase_ : Dict = data_utils
lowercase_ : Dict = data_utils
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case_ )
_UpperCAmelCase = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case_ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 133 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
SCREAMING_SNAKE_CASE__ = {
"""camembert-base""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ = """▁"""
class a_ ( SCREAMING_SNAKE_CASE_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
UpperCamelCase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
UpperCamelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCamelCase = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
UpperCamelCase = len(self.fairseq_tokens_to_ids )
UpperCamelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case__ )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(snake_case__ )
UpperCamelCase = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __getstate__( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> int:
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 353 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( )-> Tuple:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(__UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def A__ ( self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCamelCase = torch.device("""cpu""" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> str:
"""simple docstring"""
return False
| 183 | 0 |
"""simple docstring"""
_lowercase : int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowercase : Dict = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case__ ( __lowerCamelCase : dict[int, list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[bool] ):
"""simple docstring"""
lowerCamelCase__ : str =True
lowerCamelCase__ : Optional[int] =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
order.append(__lowerCamelCase )
return order
def snake_case__ ( __lowerCamelCase : dict[int, list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[bool] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Optional[Any] =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return component
def snake_case__ ( __lowerCamelCase : dict[int, list[int]] ):
"""simple docstring"""
lowerCamelCase__ : int =len(__lowerCamelCase ) * [False]
lowerCamelCase__ : dict[int, list[int]] ={vert: [] for vert in range(len(__lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__lowerCamelCase )
lowerCamelCase__ : Any =[]
for i, was_visited in enumerate(__lowerCamelCase ):
if not was_visited:
order += topology_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : Dict =len(__lowerCamelCase ) * [False]
for i in range(len(__lowerCamelCase ) ):
lowerCamelCase__ : List[Any] =order[len(__lowerCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase__ : List[Any] =find_components(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
components_list.append(__lowerCamelCase )
return components_list
| 238 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
# Construct model
if gpta_config_file == "":
lowerCamelCase__ : Dict =GPTaConfig()
else:
lowerCamelCase__ : Tuple =GPTaConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : List[str] =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase__ : int =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowercase : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 238 | 1 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE( A ):
@slow
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' ,'''prajjwal1/bert-tiny''' )
__SCREAMING_SNAKE_CASE :Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE :Any = bertabert.config.encoder.vocab_size
__SCREAMING_SNAKE_CASE :Any = tokenizer.sep_token_id
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.cls_token_id
__SCREAMING_SNAKE_CASE :Any = 1_28
__SCREAMING_SNAKE_CASE :Optional[int] = datasets.load_dataset('''cnn_dailymail''' ,'''3.0.0''' ,split='''train[:1%]''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = datasets.load_dataset('''cnn_dailymail''' ,'''3.0.0''' ,split='''validation[:1%]''' )
__SCREAMING_SNAKE_CASE :str = train_dataset.select(range(32 ) )
__SCREAMING_SNAKE_CASE :Any = val_dataset.select(range(16 ) )
__SCREAMING_SNAKE_CASE :Tuple = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__SCREAMING_SNAKE_CASE :Dict = tokenizer(batch['''article'''] ,padding='''max_length''' ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=5_12 )
__SCREAMING_SNAKE_CASE :List[str] = tokenizer(batch['''highlights'''] ,padding='''max_length''' ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=1_28 )
__SCREAMING_SNAKE_CASE :List[str] = inputs.input_ids
__SCREAMING_SNAKE_CASE :str = inputs.attention_mask
__SCREAMING_SNAKE_CASE :Dict = outputs.input_ids
__SCREAMING_SNAKE_CASE :Any = outputs.input_ids.copy()
__SCREAMING_SNAKE_CASE :Dict = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__SCREAMING_SNAKE_CASE :List[str] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE__ ) == 5_12 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Tuple = pred.label_ids
__SCREAMING_SNAKE_CASE :int = pred.predictions
# all unnecessary tokens are removed
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) / len(SCREAMING_SNAKE_CASE__ )
return {"accuracy": accuracy}
# map train dataset
__SCREAMING_SNAKE_CASE :List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,remove_columns=['''article''', '''highlights'''] ,)
train_dataset.set_format(
type='''torch''' ,columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] ,)
# same for validation dataset
__SCREAMING_SNAKE_CASE :int = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,remove_columns=['''article''', '''highlights'''] ,)
val_dataset.set_format(
type='''torch''' ,columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] ,)
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE :Any = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE__ ,per_device_train_batch_size=SCREAMING_SNAKE_CASE__ ,per_device_eval_batch_size=SCREAMING_SNAKE_CASE__ ,predict_with_generate=SCREAMING_SNAKE_CASE__ ,evaluation_strategy='''steps''' ,do_train=SCREAMING_SNAKE_CASE__ ,do_eval=SCREAMING_SNAKE_CASE__ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
__SCREAMING_SNAKE_CASE :Dict = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,compute_metrics=_compute_metrics ,train_dataset=SCREAMING_SNAKE_CASE__ ,eval_dataset=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,)
# start training
trainer.train() | 239 |
"""simple docstring"""
def __lowerCamelCase ( a_ : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(a_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod() | 239 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(lowercase , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Any = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Dict = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : List[str] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : int = ['''torch''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] ) | 210 | import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : int = False
__a : Dict = False
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
__lowercase = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 210 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__lowerCAmelCase : Union[str, Any] =models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__lowerCAmelCase : Optional[Any] =tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__lowerCAmelCase : Tuple =tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__lowerCAmelCase : Any =train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
__lowerCAmelCase : Dict =test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
__lowerCAmelCase : Dict =tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
__lowerCAmelCase : str =tf.keras.preprocessing.image.img_to_array(test_image)
__lowerCAmelCase : List[Any] =np.expand_dims(test_image, axis=0)
__lowerCAmelCase : int =classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__lowerCAmelCase : Any ="Normal"
if result[0][0] == 1:
__lowerCAmelCase : Optional[int] ="Abnormality detected"
| 123 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 1_00_00_00 ):
A__ = set(range(3 , _lowerCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCamelCase , _lowerCamelCase ) ) )
A__ = [float(_lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCamelCase , limit + 1 , _lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 123 | 1 |
'''simple docstring'''
import math
import sys
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = ''''''
try:
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as binary_file:
lowercase_ : str = binary_file.read()
for dat in data:
lowercase_ : Union[str, Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[Any] = {'''0''': '''0''', '''1''': '''1'''}
lowercase_ , lowercase_ : int = '''''', ''''''
lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : str = lexicon[curr_string]
result += last_match_id
lowercase_ : Optional[Any] = last_match_id + '''0'''
if math.loga(__SCREAMING_SNAKE_CASE ).is_integer():
lowercase_ : str = {}
for curr_key in list(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = lexicon.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = new_lex
lowercase_ : Union[str, Any] = last_match_id + '''1'''
index += 1
lowercase_ : List[str] = ''''''
return result
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = 8
try:
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as opened_file:
lowercase_ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase_ : int = data_bits[counter:]
lowercase_ : int = data_bits[counter + 1 :]
return data_bits
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[int] = read_file_binary(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = remove_prefix(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = decompress_data(__SCREAMING_SNAKE_CASE )
write_file_binary(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 93 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
lowerCAmelCase_ = None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_lowerCamelCase = random.Random()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : str=1.0 , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None ) -> List[str]:
if rng is None:
UpperCAmelCase_ = global_rng
UpperCAmelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __snake_case : Any , __snake_case : List[Any]=7 , __snake_case : Tuple=4_00 , __snake_case : Any=20_00 , __snake_case : Dict=10 , __snake_case : str=1_60 , __snake_case : List[str]=8 , __snake_case : Dict=0.0 , __snake_case : int=40_00 , __snake_case : str=False , __snake_case : Any=True , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = min_seq_length
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ = padding_value
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = return_attention_mask
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = chunk_length
UpperCAmelCase_ = hop_length
def lowerCamelCase_ ( self : Dict ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self : str , __snake_case : Optional[int]=False , __snake_case : Optional[int]=False ):
def _flatten(__snake_case : Any ):
return list(itertools.chain(*_a ) )
if equal_length:
UpperCAmelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = WhisperFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
UpperCAmelCase_ = self.feature_extraction_class.from_pretrained(_a )
UpperCAmelCase_ = feat_extract_first.to_dict()
UpperCAmelCase_ = feat_extract_second.to_dict()
UpperCAmelCase_ = feat_extract_first.mel_filters
UpperCAmelCase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
UpperCAmelCase_ = self.feature_extraction_class.from_json_file(_a )
UpperCAmelCase_ = feat_extract_first.to_dict()
UpperCAmelCase_ = feat_extract_second.to_dict()
UpperCAmelCase_ = feat_extract_first.mel_filters
UpperCAmelCase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def lowerCamelCase_ ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase_ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ = feature_extractor(_a , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
UpperCAmelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ).input_features
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase_ = np.asarray(_a )
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ).input_features
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test truncation required
UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
UpperCAmelCase_ = [np.asarray(_a ) for speech_input in speech_inputs]
UpperCAmelCase_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCAmelCase_ = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ).input_features
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
def lowerCamelCase_ ( self : Any ):
import torch
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = np.random.rand(1_00 , 32 ).astype(np.floataa )
UpperCAmelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase_ ( self : List[str] , __snake_case : Any ):
UpperCAmelCase_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase_ = ds.sort('''id''' ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self : str ):
# fmt: off
UpperCAmelCase_ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
UpperCAmelCase_ = self._load_datasamples(1 )
UpperCAmelCase_ = WhisperFeatureExtractor()
UpperCAmelCase_ = feature_extractor(_a , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1E-4 ) )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ = self._load_datasamples(1 )[0]
UpperCAmelCase_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
UpperCAmelCase_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1E-3 ) )
| 369 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
_lowercase : Dict = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowercase , _lowercase : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowercase : Optional[int] = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
assert base_extractor.is_extractable(lowerCamelCase_ )
_lowercase : str = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Union[str, Any] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : List[Any] = output_path.read_text(encoding='utf-8' )
_lowercase : Tuple = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
_lowercase : List[Any] = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowercase : int = input_paths[compression_format]
if input_path is None:
_lowercase : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
_lowercase : List[Any] = Extractor.infer_extractor_format(lowerCamelCase_ )
assert extractor_format is not None
_lowercase : int = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Optional[int] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : Any = output_path.read_text(encoding='utf-8' )
_lowercase : Any = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
import tarfile
_lowercase : Union[str, Any] = tmp_path / 'data_dot_dot'
directory.mkdir()
_lowercase : Tuple = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
import tarfile
_lowercase : int = tmp_path / 'data_sym_link'
directory.mkdir()
_lowercase : Union[str, Any] = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=lowerCamelCase_ )
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Dict = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowercase : List[Any] = insecure_tar_files[insecure_tar_file]
_lowercase : List[str] = tmp_path / 'extracted'
TarExtractor.extract(lowerCamelCase_ , lowerCamelCase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowercase : List[Any] = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowercase : Union[str, Any] = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(lowerCamelCase_ )
assert zipfile.is_zipfile(str(lowerCamelCase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase_ ) # but we're right
| 21 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_SCREAMING_SNAKE_CASE : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
super().__init__()
lowerCamelCase_ = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(model.children() )[:-2]
lowerCamelCase_ = nn.Sequential(*__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase_ = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = torch.flatten(__SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCamelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = labels
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = transforms
def __len__( self : Any ) -> Any:
return len(self.data )
def __getitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowerCamelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase_ = sentence[: self.max_seq_length]
lowerCamelCase_ = torch.zeros(self.n_classes )
lowerCamelCase_ = 1
lowerCamelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCamelCase_ = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> str:
lowerCamelCase_ = [len(row['sentence'] ) for row in batch]
lowerCamelCase_ , lowerCamelCase_ = len(_lowerCamelCase ), max(_lowerCamelCase )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
lowerCamelCase_ = input_row['sentence']
lowerCamelCase_ = 1
lowerCamelCase_ = torch.stack([row['image'] for row in batch] )
lowerCamelCase_ = torch.stack([row['label'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_start_token'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ) -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 183 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 238 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
snake_case_ = {'allegro/herbert-base-cased': 514}
snake_case_ = {}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = HerbertTokenizer
def __init__(self : Dict , a__ : Tuple=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : Optional[int]="<s>" , a__ : Optional[Any]="<unk>" , a__ : Any="<pad>" , a__ : List[Any]="<mask>" , a__ : Any="</s>" , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , sep_token=a__ , **a__ , )
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 238 | 1 |
'''simple docstring'''
from math import loga
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 | '''simple docstring'''
from itertools import product
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> list[int]:
lowercase_ : List[Any] = sides_number
lowercase_ : Dict = max_face_number * dice_number
lowercase_ : List[str] = [0] * (max_total + 1)
lowercase_ : Union[str, Any] = 1
lowercase_ : Dict = range(UpperCAmelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCAmelCase__ , repeat=UpperCAmelCase__ ):
lowercase_ : Any = sum(UpperCAmelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ) -> float:
lowercase_ : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase_ : List[str] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase_ : Union[str, Any] = 0
lowercase_ : Tuple = 9
lowercase_ : Optional[int] = 4 * 9
lowercase_ : List[Any] = 6
for peter_total in range(UpperCAmelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase_ : str = (4**9) * (6**6)
lowercase_ : List[Any] = peter_wins_count / total_games_number
lowercase_ : Dict = round(UpperCAmelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_0 , __UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :Optional[int] = image_size
lowerCAmelCase__ :Any = num_channels
lowerCAmelCase__ :Union[str, Any] = embeddings_size
lowerCAmelCase__ :Optional[int] = hidden_sizes
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :str = hidden_act
lowerCAmelCase__ :List[Any] = num_labels
lowerCAmelCase__ :Union[str, Any] = scope
lowerCAmelCase__ :Any = len(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Any = self.get_config()
return config, pixel_values
def snake_case ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = FlaxRegNetModel(config=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.num_labels
lowerCAmelCase__ :int = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
lowerCAmelCase__ :Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = config_and_inputs
lowerCAmelCase__ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ :Any = False
__magic_name__ :int = False
__magic_name__ :Any = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = FlaxRegNetModelTester(self )
lowerCAmelCase__ :Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
'''simple docstring'''
return
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :str = [*signature.parameters.keys()]
lowerCAmelCase__ :Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ :Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ :Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ :Dict = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ :Union[str, Any] = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCAmelCase__ :Any = self.default_image_processor
lowerCAmelCase__ :Dict = prepare_img()
lowerCAmelCase__ :Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :List[str] = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase__ :Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :Any = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 254 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 1:
__snake_case : List[Any] = F'Input value of [number={number}] must be > 0'
raise ValueError(__lowerCamelCase )
__snake_case : Tuple = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_snake_case : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_snake_case : List[str] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_snake_case : List[str] = "zero2"
_snake_case : Any = "zero3"
_snake_case : Dict = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case : Optional[Any] = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_snake_case : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Dict:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Any ) -> str:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Any = models[model]
__snake_case : Tuple = self.run_trainer(
stage=lowerCamelCase , model_name=lowerCamelCase , eval_steps=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
self.do_checks(lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase )
__snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__snake_case : Dict = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__snake_case : Any = self.get_launcher(lowerCamelCase )
__snake_case : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : str , lowerCamelCase : str=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 123 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A ( _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
i -= len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = i // 3
_lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase : str = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
_lowerCAmelCase : str = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def A ( _lowerCamelCase , _lowerCamelCase = 8 ):
'''simple docstring'''
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase : Tuple = any(char in ascii_uppercase for char in password )
_lowerCAmelCase : Tuple = any(char in ascii_lowercase for char in password )
_lowerCAmelCase : Optional[Any] = any(char in digits for char in password )
_lowerCAmelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCAmelCase : Tuple = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 300 | 1 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
UpperCamelCase :Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | """simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: List[Any] = {}
lowercase__: List[Any] = {}
lowercase__: Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__: Dict = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowercase__: Any = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowercase__: Optional[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__: Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowercase__: Tuple = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowercase__: List[str] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowercase__: str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowercase__: List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowercase__: Dict = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowercase__: int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ):
lowercase__: Union[str, Any] = load_image(_UpperCAmelCase )
lowercase__: Dict = self.image_processor.size['''longest_edge''']
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
lowercase__: Tuple = self.get_inference_context()
with inference_context():
lowercase__: Optional[Any] = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
lowercase__: Any = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
lowercase__: Tuple = image_embeddings
lowercase__: Optional[Any] = grid_points.shape[1]
lowercase__: Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = grid_points[:, i : i + points_per_batch, :, :]
lowercase__: int = input_labels[:, i : i + points_per_batch]
lowercase__: Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0.88 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ):
lowercase__: List[Any] = model_inputs.pop('''input_boxes''' )
lowercase__: List[Any] = model_inputs.pop('''is_last''' )
lowercase__: Any = model_inputs.pop('''original_sizes''' ).tolist()
lowercase__: Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
lowercase__: List[Any] = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__: int = model_outputs['''pred_masks''']
lowercase__: str = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
lowercase__: str = model_outputs['''iou_scores''']
lowercase__, lowercase__, lowercase__: Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ):
lowercase__: int = []
lowercase__: str = []
lowercase__: List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
lowercase__: Any = torch.cat(_UpperCAmelCase )
lowercase__: Dict = torch.cat(_UpperCAmelCase )
lowercase__, lowercase__, lowercase__, lowercase__: Any = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
lowercase__: Any = {}
if output_rle_mask:
lowercase__: Optional[Any] = rle_mask
if output_bboxes_mask:
lowercase__: Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 177 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 10**-10 ) -> float:
"""simple docstring"""
A__ = a
while True:
A__ = Decimal(lowercase_ ) - (
Decimal(eval(lowercase_ ) ) / Decimal(eval(str(diff(lowercase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase_ ) ) < precision: # noqa: S307
return float(lowercase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 371 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(lowercase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 231 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'vit'
def __init__( self : int, lowerCamelCase : Any=768, lowerCamelCase : int=12, lowerCamelCase : Optional[Any]=12, lowerCamelCase : Dict=3072, lowerCamelCase : Any="gelu", lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : Any=0.0, lowerCamelCase : int=0.02, lowerCamelCase : List[Any]=1E-12, lowerCamelCase : Optional[Any]=224, lowerCamelCase : List[str]=16, lowerCamelCase : Optional[Any]=3, lowerCamelCase : str=True, lowerCamelCase : Union[str, Any]=16, **lowerCamelCase : List[str], )-> int:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Any =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Tuple =hidden_act
lowerCamelCase__ : str =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Union[str, Any] =image_size
lowerCamelCase__ : List[Any] =patch_size
lowerCamelCase__ : Optional[int] =num_channels
lowerCamelCase__ : Optional[int] =qkv_bias
lowerCamelCase__ : List[Any] =encoder_stride
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = version.parse('1.11' )
@property
def snake_case ( self : Union[str, Any] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self : List[Any] )-> float:
return 1E-4
| 238 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int:
return (pow(__snake_case , 2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__A : int = seed
__A : Union[str, Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : List[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Optional[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Any = rand_fn(__snake_case , __snake_case , __snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , __snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
lowercase__ : List[str] = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""") | 190 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE (a__ ):
def __get__( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
__A : Optional[Any] = '__cached_' + self.fget.__name__
__A : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if cached is None:
__A : int = self.fget(_UpperCAmelCase)
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return cached
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Tuple:
__A : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def _lowerCAmelCase ( __snake_case : int ) -> Tuple:
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
return isinstance(__snake_case , np.ndarray )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Any:
return _is_numpy(__snake_case )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
import torch
return isinstance(__snake_case , torch.Tensor )
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(__snake_case )
def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]:
import torch
return isinstance(__snake_case , torch.device )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
return False if not is_torch_available() else _is_torch_device(__snake_case )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
__A : str = getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Any:
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[Any]:
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def _lowerCAmelCase ( __snake_case : str ) -> List[str]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def _lowerCAmelCase ( __snake_case : int ) -> List[str]:
return False if not is_flax_available() else _is_jax(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = fields(self)
# Safety and consistency checks
if not len(_UpperCAmelCase):
raise ValueError(F'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.')
__A : Tuple = getattr(self , class_fields[0].name)
__A : Tuple = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[str] = first_field.items()
__A : List[Any] = True
else:
try:
__A : List[Any] = iter(_UpperCAmelCase)
__A : Optional[Any] = True
except TypeError:
__A : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_UpperCAmelCase):
if (
not isinstance(_UpperCAmelCase , (list, tuple))
or not len(_UpperCAmelCase) == 2
or not isinstance(element[0] , _UpperCAmelCase)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__A : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
__A : Optional[int] = element[1]
elif first_field is not None:
__A : Dict = first_field
else:
for field in class_fields:
__A : List[str] = getattr(self , field.name)
if v is not None:
__A : Union[str, Any] = v
def __delitem__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase)
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase)
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''longest'''
lowerCAmelCase = '''max_length'''
lowerCAmelCase = '''do_not_pad'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''pt'''
lowerCAmelCase = '''tf'''
lowerCAmelCase = '''np'''
lowerCAmelCase = '''jax'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = context_managers
__A : Optional[int] = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_UpperCAmelCase)
def __exit__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
self.stack.__exit__(*_UpperCAmelCase , **_UpperCAmelCase)
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
__A : Any = infer_framework(__snake_case )
if framework == "tf":
__A : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__A : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
__A : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : Any = model_class.__name__
__A : Optional[int] = infer_framework(__snake_case )
if framework == "tf":
__A : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__A : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
__A : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase ( __snake_case : MutableMapping , __snake_case : str = "" , __snake_case : str = "." ) -> Union[str, Any]:
def _flatten_dict(__snake_case : Tuple , __snake_case : List[Any]="" , __snake_case : Tuple="." ):
for k, v in d.items():
__A : List[Any] = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def _lowerCAmelCase ( __snake_case : Any , __snake_case : bool = False ) -> List[str]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=None ) -> int:
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(f'Type not supported for transpose: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : str ) -> str:
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(f'Type not supported for reshape: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[str]=None ) -> Any:
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(f'Type not supported for squeeze: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple ) -> Union[str, Any]:
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
__A : Tuple = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__A : Dict = f'{repo_id}--{value}'
return auto_map
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
for base_class in inspect.getmro(__snake_case ):
__A : int = base_class.__module__
__A : List[str] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' ) | 190 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
lowercase_ : int = parent
lowercase_ : Tuple = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Any = is_training
lowercase_ : Optional[Any] = use_input_mask
lowercase_ : int = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : int = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : str = num_choices
lowercase_ : Tuple = scope
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[Any] = None
lowercase_ : List[str] = None
lowercase_ : str = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCAmelCase , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = FalconModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
lowercase_ : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : str = True
lowercase_ : str = FalconModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
lowercase_ : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
lowercase_ : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = True
lowercase_ : Tuple = True
lowercase_ : Union[str, Any] = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
lowercase_ : str = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
lowercase_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
lowercase_ : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
lowercase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.prepare_config_and_inputs()
(
lowercase_
) : Any = config_and_inputs
lowercase_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase_ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = FalconModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase_ : Union[str, Any] = alibi
self.model_tester.create_and_check_model(__UpperCAmelCase , *__UpperCAmelCase )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = 3
lowercase_ : List[Any] = input_dict["""input_ids"""]
lowercase_ : str = input_ids.ne(1 ).to(__UpperCAmelCase )
lowercase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Union[str, Any] = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[int] = 3
lowercase_ : Optional[int] = """single_label_classification"""
lowercase_ : str = input_dict["""input_ids"""]
lowercase_ : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
lowercase_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : str = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = input_dict["""input_ids"""]
lowercase_ : List[Any] = FalconForCausalLM(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : List[str] = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowercase_ : List[Any] = input_ids.shape[0]
lowercase_ : List[str] = model._convert_to_rw_cache(result.past_key_values )
lowercase_ : List[str] = model._convert_cache_to_standard_format(__UpperCAmelCase , __UpperCAmelCase )
for layer in range(len(__UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = 3
lowercase_ : Dict = """multi_label_classification"""
lowercase_ : int = input_dict["""input_ids"""]
lowercase_ : Optional[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
lowercase_ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : Any = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCAmelCase , '''use_cache''' ):
return
lowercase_ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
if "use_cache" not in inputs:
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = model(**__UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase_ : str = (
getattr(__UpperCAmelCase , '''decoder_layers''' , __UpperCAmelCase )
or getattr(__UpperCAmelCase , '''num_decoder_layers''' , __UpperCAmelCase )
or config.num_hidden_layers
)
lowercase_ : Union[str, Any] = getattr(__UpperCAmelCase , '''num_kv_heads''' , config.num_attention_heads )
lowercase_ : List[str] = getattr(__UpperCAmelCase , '''d_model''' , config.hidden_size )
lowercase_ : Optional[int] = embed_dim // num_attention_heads
lowercase_ : Optional[Any] = outputs["""past_key_values"""]
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
lowercase_ : str = inputs["""input_ids"""].shape
for i in range(__UpperCAmelCase ):
if config.new_decoder_architecture:
lowercase_ : Tuple = config.num_attention_heads
elif config.multi_query:
lowercase_ : List[str] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowercase_ : List[str] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__UpperCAmelCase )
lowercase_ : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
lowercase_ : Any = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowercase_ : Tuple = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=19 )
lowercase_ : int = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _snake_case ( self ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase_ : Tuple = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowercase_ : Tuple = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(__UpperCAmelCase )
lowercase_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def _snake_case ( self ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowercase_ : List[Any] = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(device=__UpperCAmelCase )
lowercase_ : Union[str, Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
# Test results are the same with and without cache
lowercase_ : List[str] = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase )
lowercase_ : Any = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 , use_cache=__UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 93 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : Any = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = time.time()
locka.acquire(lowerCAmelCase__ )
assert time.time() - _start > timeout
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = """a""" * 1000 + """.lock"""
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowerCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__UpperCAmelCase : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
locka.acquire(0 )
| 254 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase : Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowercase : str = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase : str = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowerCAmelCase__ ( _a : Optional[int] , _a : str , _a : int , _a : List[Any] ):
snake_case_ : Tuple = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
snake_case_ : Tuple = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , _a , )
is not None
):
snake_case_ : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case_ : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case_ : List[Any] = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
snake_case_ : List[str] = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
snake_case_ : Optional[Any] = True
if not attribute_used:
snake_case_ : Any = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case_ : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case_ : Dict = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case_ : Union[str, Any] = True
elif attribute.endswith("_token_id" ):
snake_case_ : Dict = True
# configuration class specific cases
if not case_allowed:
snake_case_ : Any = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case_ : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCAmelCase__ ( _a : int ):
snake_case_ : str = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case_ : Optional[Any] = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
snake_case_ : Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case_ : Union[str, Any] = {}
if len(config_class.attribute_map ) > 0:
snake_case_ : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case_ : Union[str, Any] = inspect.getsourcefile(_a )
snake_case_ : Optional[int] = os.path.dirname(_a )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case_ : Optional[int] = [os.path.join(_a , _a ) for fn in os.listdir(_a ) if fn.startswith("modeling_" )]
# Get the source code strings
snake_case_ : Tuple = []
for path in modeling_paths:
if os.path.isfile(_a ):
with open(_a ) as fp:
modeling_sources.append(fp.read() )
snake_case_ : str = []
for config_param, default_value in zip(_a , _a ):
# `attributes` here is all the variant names for `config_param`
snake_case_ : List[str] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_a , _a , _a , _a ):
unused_attributes.append(attributes[0] )
return sorted(_a )
def lowerCAmelCase__ ( ):
snake_case_ : str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case_ : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _a : inspect.isclass(_a )
and issubclass(_a , _a )
and inspect.getmodule(_a ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case_ : int = check_config_attributes_being_used(_a )
if len(_a ) > 0:
snake_case_ : Tuple = unused_attributes
if len(_a ) > 0:
snake_case_ : Optional[Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(_a )
if __name__ == "__main__":
check_config_attributes()
| 36 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ) -> Dict:
snake_case_ : int = []
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
self.events.append("on_init_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
self.events.append("on_train_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_train_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
self.events.append("on_epoch_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.events.append("on_epoch_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_step_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_step_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_evaluate" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_predict" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.events.append("on_save" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_log" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
self.events.append("on_prediction_step" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[int] = tempfile.mkdtemp()
def _lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.output_dir )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Dict:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : Any = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = RegressionModelConfig(a=_SCREAMING_SNAKE_CASE , b=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = RegressionPreTrainedModel(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=_SCREAMING_SNAKE_CASE , report_to=[] , **_SCREAMING_SNAKE_CASE )
return Trainer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , callbacks=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , _SCREAMING_SNAKE_CASE )
else:
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : int = ["on_init_end", "on_train_begin"]
snake_case_ : Any = 0
snake_case_ : Dict = len(trainer.get_eval_dataloader() )
snake_case_ : Tuple = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(_SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Dict = self.get_trainer()
snake_case_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : List[Any] = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case_ : str = self.get_trainer()
snake_case_ : Tuple = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
snake_case_ : List[str] = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case_ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
snake_case_ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
snake_case_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 36 | 1 |
def __snake_case ( _lowerCAmelCase : int ) -> None:
A_ : Optional[int] = generate_pascal_triangle(_lowerCAmelCase )
for row_idx in range(_lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __snake_case ( _lowerCAmelCase : int ) -> list[list[int]]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A_ : list[list[int]] = []
for current_row_idx in range(_lowerCAmelCase ):
A_ : List[str] = populate_current_row(_lowerCAmelCase , _lowerCAmelCase )
triangle.append(_lowerCAmelCase )
return triangle
def __snake_case ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ) -> list[int]:
A_ : Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A_ , A_ : Any = 1, 1
for current_col_idx in range(1 , _lowerCAmelCase ):
calculate_current_element(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return current_row
def __snake_case ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ) -> None:
A_ : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
A_ : str = triangle[current_row_idx - 1][current_col_idx]
A_ : List[Any] = above_to_left_elt + above_to_right_elt
def __snake_case ( _lowerCAmelCase : int ) -> list[list[int]]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A_ : list[list[int]] = [[1]]
for row_index in range(1 , _lowerCAmelCase ):
A_ : str = [0] + result[-1] + [0]
A_ : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
A_ : Dict = sum(divmod(_lowerCAmelCase , 2 ) )
A_ : Any = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A_ : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A_ : Union[str, Any] = row_first_half + row_second_half
result.append(_lowerCAmelCase )
return result
def __snake_case ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None:
A_ : str = f"{func.__name__}({value})"
A_ : Optional[int] = timeit(f"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 300 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300 | 1 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( a_ : ArgumentParser ):
lowerCAmelCase_ : Optional[int] = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Dict = huggingface_hub.__version__
lowerCAmelCase_ : Any = "not installed"
lowerCAmelCase_ : List[str] = "NA"
if is_torch_available():
import torch
lowerCAmelCase_ : Union[str, Any] = torch.__version__
lowerCAmelCase_ : Optional[Any] = torch.cuda.is_available()
lowerCAmelCase_ : Any = "not installed"
if is_transformers_available():
import transformers
lowerCAmelCase_ : List[Any] = transformers.__version__
lowerCAmelCase_ : Optional[Any] = "not installed"
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : str = accelerate.__version__
lowerCAmelCase_ : Optional[Any] = "not installed"
if is_xformers_available():
import xformers
lowerCAmelCase_ : List[Any] = xformers.__version__
lowerCAmelCase_ : Any = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def lowerCamelCase ( a_ : Union[str, Any] ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 365 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = """gpt_neo"""
a_ : List[Any] = ["""past_key_values"""]
a_ : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[int] , a_ : List[str]=5_02_57 , a_ : List[str]=20_48 , a_ : Union[str, Any]=20_48 , a_ : Union[str, Any]=24 , a_ : Optional[int]=[[["global", "local"], 12]] , a_ : str=16 , a_ : Optional[Any]=None , a_ : str=2_56 , a_ : Union[str, Any]="gelu_new" , a_ : Optional[int]=0.0 , a_ : Optional[Any]=0.0 , a_ : List[Any]=0.0 , a_ : List[Any]=0.1 , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.02 , a_ : int=True , a_ : Optional[Any]=5_02_56 , a_ : Tuple=5_02_56 , **a_ : str , ):
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_layers
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Union[str, Any] = window_size
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : str = resid_dropout
lowerCAmelCase_ : Union[str, Any] = embed_dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : Dict = classifier_dropout
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : str = eos_token_id
lowerCAmelCase_ : Optional[Any] = attention_types
lowerCAmelCase_ : Optional[Any] = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def lowerCamelCase ( a_ : Optional[Any] ):
lowerCAmelCase_ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
import torch
lowerCAmelCase_ : str = input.size()
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Tuple = shape[dimension]
lowerCAmelCase_ : Tuple = torch.arange(0 , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = torch.div(sizedim - size , __UpperCamelCase , rounding_mode="floor" ) + 1
lowerCAmelCase_ : Dict = torch.arange(__UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCAmelCase_ : Tuple = [slice(__UpperCamelCase )] * rank
lowerCAmelCase_ : List[str] = indices
lowerCAmelCase_ : Dict = input[s]
lowerCAmelCase_ : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase_ : Optional[int] = torch.arange(1 , __UpperCamelCase )
lowerCAmelCase_ : Tuple = torch.remainder(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = remainders == 0
lowerCAmelCase_ : List[Any] = candidates[divisor_indices]
lowerCAmelCase_ : List[str] = torch.max(__UpperCamelCase )
return largest_divisor, torch.div(__UpperCamelCase , __UpperCamelCase , rounding_mode="floor" )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@property
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCAmelCase_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase ( self : int ):
return self._config.num_heads
def lowerCamelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : int = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : str = seqlen + 2
lowerCAmelCase_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Tuple = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any] ):
return 13
| 161 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 138 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , _UpperCamelCase ) -> List[str]:
super().__init__()
lowerCAmelCase_ = model
lowerCAmelCase_ = 2
lowerCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __a ( self ) -> Tuple:
pass
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = LongformerModel.from_pretrained(__lowerCAmelCase )
lowerCAmelCase_ = LightningModel(__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 231 | 0 |
import math
import tensorflow as tf
from packaging import version
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = tf.convert_to_tensor(lowercase )
UpperCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = tf.convert_to_tensor(lowercase )
UpperCamelCase = tf.cast(math.pi , x.dtype )
UpperCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase , 3 )) ))
return x * cdf
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tf.convert_to_tensor(lowercase )
return x * tf.tanh(tf.math.softplus(lowercase ) )
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = tf.convert_to_tensor(lowercase )
UpperCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCamelCase = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = tf.convert_to_tensor(lowercase )
UpperCamelCase = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A ( lowercase ) -> Dict:
'''simple docstring'''
return tf.clip_by_value(_gelu(lowercase ) , -10 , 10 )
def A ( lowercase , lowercase=-1 ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = tf.split(lowercase , 2 , axis=lowercase )
return a * tf.math.sigmoid(lowercase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def A ( lowercase ) -> Dict:
'''simple docstring'''
return tf.keras.activations.gelu(lowercase , approximate=lowercase )
_UpperCAmelCase : List[str] = tf.keras.activations.gelu
_UpperCAmelCase : List[Any] = approximate_gelu_wrap
else:
_UpperCAmelCase : List[Any] = _gelu
_UpperCAmelCase : Any = _gelu_new
_UpperCAmelCase : Tuple = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 110 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ = 768 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
UpperCamelCase = nn.Parameter(torch.ones(1 , A_ ) )
def __UpperCamelCase ( self , A_ = None , A_ = None , ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 110 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 190 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ : List[Any] = pd.read_csv('''sample_data.csv''', header=None)
lowercase__ : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ : Any = df.iloc[:, 1:2]
lowercase__ : int = actual_data.values.reshape(len_data, 1)
lowercase__ : int = MinMaxScaler().fit_transform(actual_data)
lowercase__ : Dict = 10
lowercase__ : List[str] = 5
lowercase__ : Dict = 20
lowercase__ : Dict = len_data - periods * look_back
lowercase__ : Any = actual_data[:division]
lowercase__ : Optional[int] = actual_data[division - look_back :]
lowercase__ , lowercase__ : Optional[Any] = [], []
lowercase__ , lowercase__ : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ : List[Any] = np.array(train_x)
lowercase__ : List[Any] = np.array(test_x)
lowercase__ : str = np.array([list(i.ravel()) for i in train_y])
lowercase__ : Any = np.array([list(i.ravel()) for i in test_y])
lowercase__ : Optional[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowercase__ : Tuple = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
lowercase__ : int = model.predict(x_test) | 190 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-10 ) -> float:
lowerCamelCase =a
while True:
lowerCamelCase =Decimal(_UpperCAmelCase ) - (
Decimal(eval(_UpperCAmelCase ) ) / Decimal(eval(str(diff(_UpperCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_UpperCAmelCase ) ) < precision: # noqa: S307
return float(_UpperCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 262 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( a , a , unittest.TestCase ):
__A = IFInpaintingSuperResolutionPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__A = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _snake_case ( self ):
return self._get_superresolution_dummy_components()
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith("""mps""" ):
lowerCamelCase =torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 262 | 1 |
from math import ceil
def A ( _lowerCamelCase = 1_001 ):
'''simple docstring'''
_lowerCAmelCase : int = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCAmelCase : List[Any] = 2 * i + 1
_lowerCAmelCase : str = 2 * i
_lowerCAmelCase : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 36 |
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
_lowerCamelCase : int = datasets.logging.get_logger(__name__)
_lowerCamelCase : List[Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
_lowerCamelCase : int = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
_lowerCamelCase : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def __magic_name__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://unbabel.github.io/COMET/html/index.html''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''sources''': datasets.Value('''string''', id='''sequence''' ),
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/Unbabel/COMET'''], reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
], )
def __magic_name__ ( self : Tuple, __A : Any ):
if self.config_name == "default":
UpperCAmelCase : Union[str, Any] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __magic_name__ ( self : Optional[int], __A : Optional[Any], __A : Optional[Any], __A : int, __A : Optional[int]=None, __A : List[str]=False ):
if gpus is None:
UpperCAmelCase : Union[str, Any] = 1 if torch.cuda.is_available() else 0
UpperCAmelCase : Dict = {"src": sources, "mt": predictions, "ref": references}
UpperCAmelCase : Union[str, Any] = [dict(zip(UpperCamelCase__, UpperCamelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase : str = self.scorer.predict(UpperCamelCase__, gpus=UpperCamelCase__, progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 369 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 99 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]=9_9 , __lowerCAmelCase : Tuple=3_2 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Optional[int]=None , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Union[str, Any] = num_choices
_lowerCamelCase : Optional[Any] = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Any = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Tuple = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = NystromformerModel(config=_A )
model.to(_A )
model.eval()
_lowerCamelCase : List[str] = model(_A , attention_mask=_A , token_type_ids=_A )
_lowerCamelCase : int = model(_A , token_type_ids=_A )
_lowerCamelCase : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = NystromformerForMaskedLM(config=_A )
model.to(_A )
model.eval()
_lowerCamelCase : Tuple = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = NystromformerForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCamelCase : int = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : Tuple = NystromformerForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCamelCase : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Dict = NystromformerForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCamelCase : Dict = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_choices
_lowerCamelCase : Dict = NystromformerForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = NystromformerModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Dict = type
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = NystromformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCamelCase : str = model(_A )[0]
_lowerCamelCase : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , _A )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = '''the [MASK] of Belgium is Brussels'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_lowerCamelCase : Optional[Any] = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_lowerCamelCase : Dict = tokenizer(_A , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(encoding.input_ids ).logits
_lowerCamelCase : List[Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_A ) , '''capital''' )
| 72 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a__ : Any = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , **_A :Any ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , _A :Union[str, List[str], "Image", List["Image"]] , **_A :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self :Optional[int] , **_A :Dict ) -> Optional[Any]:
'''simple docstring'''
__A = {}
if "candidate_labels" in kwargs:
__A = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__A = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self :Optional[int] , _A :str , _A :str=None , _A :Tuple="This is a photo of {}." ) -> Optional[int]:
'''simple docstring'''
__A = load_image(_A )
__A = self.image_processor(images=[image] , return_tensors=self.framework )
__A = candidate_labels
__A = [hypothesis_template.format(_A ) for x in candidate_labels]
__A = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__A = [text_inputs]
return inputs
def lowercase_ ( self :List[str] , _A :Tuple ) -> Tuple:
'''simple docstring'''
__A = model_inputs.pop('candidate_labels' )
__A = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__A = text_inputs[0]
else:
# Batching case.
__A = text_inputs[0][0]
__A = self.model(**_A , **_A )
__A = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Dict:
'''simple docstring'''
__A = model_outputs.pop('candidate_labels' )
__A = model_outputs['logits'][0]
if self.framework == "pt":
__A = logits.softmax(dim=-1 ).squeeze(-1 )
__A = probs.tolist()
if not isinstance(_A , _A ):
__A = [scores]
elif self.framework == "tf":
__A = stable_softmax(_A , axis=-1 )
__A = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__A = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 161 | 0 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a ="""CompVis/stable-diffusion-v1-1"""
a ="""CompVis/stable-diffusion-v1-2"""
a ="""CompVis/stable-diffusion-v1-3"""
a ="""CompVis/stable-diffusion-v1-4"""
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : AutoencoderKL ,SCREAMING_SNAKE_CASE__ : CLIPTextModel ,SCREAMING_SNAKE_CASE__ : CLIPTokenizer ,SCREAMING_SNAKE_CASE__ : UNetaDConditionModel ,SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,SCREAMING_SNAKE_CASE__ : StableDiffusionSafetyChecker ,SCREAMING_SNAKE_CASE__ : CLIPImageProcessor ,SCREAMING_SNAKE_CASE__ : bool = True ,):
super()._init_()
__lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,requires_safety_checker=SCREAMING_SNAKE_CASE__ ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea)
@property
def lowerCAmelCase ( self : int):
return {k: getattr(self ,SCREAMING_SNAKE_CASE__) for k in self.config.keys() if not k.startswith('_')}
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__)
@torch.no_grad()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : Any ,):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@torch.no_grad()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@torch.no_grad()
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : int ,):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@torch.no_grad()
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@torch.no_grad()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
__lowerCamelCase : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(SCREAMING_SNAKE_CASE__)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}.")
# Get first result from Stable Diffusion Checkpoint v1.1
__lowerCamelCase : str = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# Get first result from Stable Diffusion Checkpoint v1.2
__lowerCamelCase : str = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# Get first result from Stable Diffusion Checkpoint v1.3
__lowerCamelCase : List[str] = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# Get first result from Stable Diffusion Checkpoint v1.4
__lowerCamelCase : str = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 371 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
a =parser.parse_args()
a ="""cpu"""
a ="""a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
a ="""path-to-your-trained-model"""
a =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a =pipe.to(device)
# to channels last
a =pipe.unet.to(memory_format=torch.channels_last)
a =pipe.vae.to(memory_format=torch.channels_last)
a =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a =torch.randn(2, 4, 64, 64)
a =torch.rand(1) * 999
a =torch.randn(2, 77, 768)
a =(sample, timestep, encoder_hidden_status)
try:
a =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a =666
a =torch.Generator(device).manual_seed(seed)
a ={"""generator""": generator}
if args.steps is not None:
a =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 113 | 0 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _a ( UpperCamelCase__ ):
def __init__( self: Tuple , UpperCamelCase_: Callable , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[dict] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 110 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 110 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=64 , _a=5 , _a=4 , _a=64 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : int = use_input_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE__ : int = scope
def _a ( self ) -> Tuple:
"""simple docstring"""
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MPNetModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : str = MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _a , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Tuple = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Tuple = False
_SCREAMING_SNAKE_CASE :Any = True
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MPNetModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Any = model(_a )[0]
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 56 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : str = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] = value.to(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : List[str] = new_value
else:
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[Any]:
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : str = module.in_features
SCREAMING_SNAKE_CASE__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = sum(__lowerCAmelCase , [] )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : str = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Any = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 56 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_UpperCAmelCase : Any ="""\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_UpperCAmelCase : Optional[Any] ="""\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
_UpperCAmelCase : List[Any] ="""
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
_UpperCAmelCase : Union[str, Any] ="""
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
_UpperCAmelCase : str ="""The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> Dict:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase=[1, 1_0, 1_0_0] , __lowercase=4 , __lowercase=3.0 ) -> Optional[Any]:
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__lowercase ) as executor:
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = Counter()
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = defaultdict(__lowercase )
for task_id, (candidates, test_case) in enumerate(zip(__lowercase , __lowercase ) ):
for candidate in candidates:
lowerCAmelCase_ : Union[str, Any] = candidate + '''\n''' + test_case
lowerCAmelCase_ : List[str] = (test_program, timeout, task_id, completion_id[task_id])
lowerCAmelCase_ : Optional[Any] = executor.submit(__lowercase , *__lowercase )
futures.append(__lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowercase ):
lowerCAmelCase_ : Tuple = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = [], []
for result in results.values():
result.sort()
lowerCAmelCase_ : Any = [r[1]['''passed'''] for r in result]
total.append(len(__lowercase ) )
correct.append(sum(__lowercase ) )
lowerCAmelCase_ : Dict = np.array(__lowercase )
lowerCAmelCase_ : List[str] = np.array(__lowercase )
lowerCAmelCase_ : Optional[int] = k
lowerCAmelCase_ : Tuple = {f"""pass@{k}""": estimate_pass_at_k(__lowercase , __lowercase , __lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
else:
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = iter(lowerCAmelCase_ )
return np.array([estimator(int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) , lowerCAmelCase_ ) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) | 262 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Dict = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : str ="""canine"""
def __init__( self,__SCREAMING_SNAKE_CASE=7_68,__SCREAMING_SNAKE_CASE=12,__SCREAMING_SNAKE_CASE=12,__SCREAMING_SNAKE_CASE=30_72,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1_63_84,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-12,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE=0XE0_00,__SCREAMING_SNAKE_CASE=0XE0_01,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=1_63_84,__SCREAMING_SNAKE_CASE=1_28,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE,bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
# Character config:
__lowerCAmelCase = downsampling_rate
__lowerCAmelCase = upsampling_kernel_size
__lowerCAmelCase = num_hash_functions
__lowerCAmelCase = num_hash_buckets
__lowerCAmelCase = local_transformer_stride
| 361 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : List[Any] = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE ( A_ = 1_00_00_00 , A_ = 10 ):
lowerCAmelCase__ : defaultdict = defaultdict(A_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase__ : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase__ : Tuple = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 99 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'mgp-str'
def __init__( self , lowerCamelCase__=[3_2, 1_2_8] , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=2_7 , lowerCamelCase__=3_8 , lowerCamelCase__=5_0_2_5_7 , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = max_token_length
_lowerCamelCase = num_character_labels
_lowerCamelCase = num_bpe_labels
_lowerCamelCase = num_wordpiece_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = mlp_ratio
_lowerCamelCase = distilled
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = drop_rate
_lowerCamelCase = qkv_bias
_lowerCamelCase = attn_drop_rate
_lowerCamelCase = drop_path_rate
_lowerCamelCase = output_aa_attentions
_lowerCamelCase = initializer_range
| 73 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'mgp-str'
def __init__( self , lowerCamelCase__=[3_2, 1_2_8] , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=2_7 , lowerCamelCase__=3_8 , lowerCamelCase__=5_0_2_5_7 , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = max_token_length
_lowerCamelCase = num_character_labels
_lowerCamelCase = num_bpe_labels
_lowerCamelCase = num_wordpiece_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = mlp_ratio
_lowerCamelCase = distilled
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = drop_rate
_lowerCamelCase = qkv_bias
_lowerCamelCase = attn_drop_rate
_lowerCamelCase = drop_path_rate
_lowerCamelCase = output_aa_attentions
_lowerCamelCase = initializer_range
| 73 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _snake_case ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = BarthezTokenizer
A__ : Dict = BarthezTokenizerFast
A__ : List[Any] = True
A__ : Union[str, Any] = True
def A__ ( self: List[Any] ) -> Dict:
super().setUp()
UpperCAmelCase_ : Optional[int] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = tokenizer
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : int = """<pad>"""
UpperCAmelCase_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) ,lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""<mask>""" )
self.assertEqual(len(lowerCAmelCase__ ) ,101122 )
def A__ ( self: Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCAmelCase_ : List[str] = [0, 57, 3018, 70307, 91, 2]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
UpperCAmelCase_ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def A__ ( self: Tuple ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : int = """I was born in 92000, and this is falsé."""
UpperCAmelCase_ : int = tokenizer.tokenize(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
UpperCAmelCase_ : int = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
UpperCAmelCase_ : int = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@slow
def A__ ( self: Optional[int] ) -> str:
# fmt: off
UpperCAmelCase_ : str = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase_ : str = [
"""Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ ,model_name="""moussaKam/mbarthez""" ,revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" ,sequences=lowerCAmelCase__ ,)
| 345 |
"""simple docstring"""
__UpperCamelCase = 0 # The first color of the flag.
__UpperCamelCase = 1 # The second color of the flag.
__UpperCamelCase = 2 # The third color of the flag.
__UpperCamelCase = (red, white, blue)
def lowercase (SCREAMING_SNAKE_CASE_ : list ) -> list:
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return list(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1
SCREAMING_SNAKE_CASE = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = input('''Enter numbers separated by commas:\n''').strip()
__UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 113 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCAmelCase ( lowerCamelCase_ :list[int] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Union[str, Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowercase ):
snake_case_ : Any = burst_time[i]
snake_case_ : list[int] = []
snake_case_ : str = 0
snake_case_ : Tuple = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case_ : List[str] = []
snake_case_ : Dict = -1
for i in range(_lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case_ : Optional[int] = i
total_time += burst_time[target_process]
completed += 1
snake_case_ : Optional[int] = 0
snake_case_ : Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase ( lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :list[int] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [0] * no_of_processes
for i in range(_lowercase ):
snake_case_ : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__A : Optional[int] = 4
__A : Union[str, Any] = [2, 5, 3, 7]
__A : Optional[int] = [0, 0, 0, 0]
__A : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__A : List[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}') | 362 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
if len(lowerCamelCase_ ) <= 1:
return lst
snake_case_ : Union[str, Any] = 1
while i < len(lowerCamelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case_ : int = 1
return lst
if __name__ == "__main__":
__A : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__A : int = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted)) | 8 | 0 |
'''simple docstring'''
a : Union[str, Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 56 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
a : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
a : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 56 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = "gptsan-japanese"
__snake_case : List[Any] = [
"past_key_values",
]
__snake_case : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int ,lowerCamelCase__ : Optional[int]=36000 ,lowerCamelCase__ : List[str]=1280 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : int=8192 ,lowerCamelCase__ : int=4096 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : Union[str, Any]=10 ,lowerCamelCase__ : Optional[Any]=0 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Tuple=16 ,lowerCamelCase__ : Dict=128 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : int=1e-5 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : List[Any]="float32" ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Dict=0.002 ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : int=35998 ,lowerCamelCase__ : Optional[Any]=35995 ,lowerCamelCase__ : Tuple=35999 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = d_ext
SCREAMING_SNAKE_CASE = d_spout
SCREAMING_SNAKE_CASE = num_switch_layers
SCREAMING_SNAKE_CASE = num_ext_layers
SCREAMING_SNAKE_CASE = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = num_experts
SCREAMING_SNAKE_CASE = expert_capacity
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = router_bias
SCREAMING_SNAKE_CASE = router_jitter_noise
SCREAMING_SNAKE_CASE = router_dtype
SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE = output_hidden_states
SCREAMING_SNAKE_CASE = output_attentions
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = output_router_logits
SCREAMING_SNAKE_CASE = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 359 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = TypeVar("""DatasetType""", Dataset, IterableDataset)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE )
else:
return _interleave_iterable_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , stopping_strategy=_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , (Dataset, IterableDataset) ):
if isinstance(_SCREAMING_SNAKE_CASE , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_SCREAMING_SNAKE_CASE )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_SCREAMING_SNAKE_CASE ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_SCREAMING_SNAKE_CASE ).__name__}.""" )
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
(Dataset, IterableDataset) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else (IterableDataset, Dataset)
)
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
else:
return _concatenate_iterable_datasets(_SCREAMING_SNAKE_CASE , info=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , axis=_SCREAMING_SNAKE_CASE )
| 193 | 0 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : Dict = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : int, **_lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return AutoConfig.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : List[Any], **_lowerCAmelCase : Optional[int] ) -> List[str]:
return AutoTokenizer.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : Optional[int], **_lowerCAmelCase : Any ) -> Tuple:
return AutoModel.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : Optional[Any], **_lowerCAmelCase : List[str] ) -> List[Any]:
return AutoModelForCausalLM.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : Optional[Any], **_lowerCAmelCase : str ) -> Tuple:
return AutoModelForMaskedLM.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : List[Any], **_lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
return AutoModelForSequenceClassification.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *_lowerCAmelCase : Tuple, **_lowerCAmelCase : int ) -> List[str]:
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCAmelCase, **_lowerCAmelCase )
| 246 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46 | 0 |
"""simple docstring"""
from __future__ import annotations
_SCREAMING_SNAKE_CASE : List[str] = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
_SCREAMING_SNAKE_CASE : str = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def _lowerCAmelCase ( UpperCAmelCase : list[float] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =[]
UpperCamelCase__ : List[Any] =len(UpperCAmelCase )
for i in range(UpperCAmelCase ):
UpperCamelCase__ : float =-1
for j in range(i + 1 , UpperCAmelCase ):
if arr[i] < arr[j]:
UpperCamelCase__ : Union[str, Any] =arr[j]
break
result.append(UpperCAmelCase )
return result
def _lowerCAmelCase ( UpperCAmelCase : list[float] ):
'''simple docstring'''
UpperCamelCase__ : Tuple =[]
for i, outer in enumerate(UpperCAmelCase ):
UpperCamelCase__ : float =-1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase__ : Dict =inner
break
result.append(UpperCAmelCase )
return result
def _lowerCAmelCase ( UpperCAmelCase : list[float] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] =len(UpperCAmelCase )
UpperCamelCase__ : list[float] =[]
UpperCamelCase__ : list[float] =[-1] * arr_size
for index in reversed(range(UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase__ : Dict =stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_SCREAMING_SNAKE_CASE : str = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 157 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 157 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int = 1_6 ,SCREAMING_SNAKE_CASE__ : int = 8_8 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : str = "geglu" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,):
super().__init__()
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = attention_head_dim
__lowerCamelCase : Union[str, Any] = num_attention_heads * attention_head_dim
__lowerCamelCase : Optional[Any] = in_channels
__lowerCamelCase : Tuple = torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE__ ,num_channels=SCREAMING_SNAKE_CASE__ ,eps=1E-6 ,affine=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 3. Define transformers blocks
__lowerCamelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,cross_attention_dim=SCREAMING_SNAKE_CASE__ ,activation_fn=SCREAMING_SNAKE_CASE__ ,attention_bias=SCREAMING_SNAKE_CASE__ ,double_self_attention=SCREAMING_SNAKE_CASE__ ,norm_elementwise_affine=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__)
])
__lowerCamelCase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = hidden_states.shape
__lowerCamelCase : List[str] = batch_frames // num_frames
__lowerCamelCase : int = hidden_states
__lowerCamelCase : Optional[int] = hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = hidden_states.permute(0 ,2 ,1 ,3 ,4)
__lowerCamelCase : Any = self.norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states.permute(0 ,3 ,4 ,2 ,1).reshape(batch_size * height * width ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.proj_in(SCREAMING_SNAKE_CASE__)
# 2. Blocks
for block in self.transformer_blocks:
__lowerCamelCase : Union[str, Any] = block(
SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,cross_attention_kwargs=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__ ,)
# 3. Output
__lowerCamelCase : Optional[Any] = self.proj_out(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = (
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
.permute(0 ,3 ,4 ,1 ,2)
.contiguous()
)
__lowerCamelCase : List[str] = hidden_states.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE__)
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
a : str = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A_ , )
is not None
):
a : List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a : str = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a : Tuple = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a : str = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a : int = True
if not attribute_used:
a : Dict = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a : List[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a : str = True
elif attribute.endswith("_token_id" ):
a : str = True
# configuration class specific cases
if not case_allowed:
a : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
a : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a : Dict = {}
if len(config_class.attribute_map ) > 0:
a : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a : int = inspect.getsourcefile(A_ )
a : Union[str, Any] = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a : Optional[Any] = [os.path.join(A_ , A_ ) for fn in os.listdir(A_ ) if fn.startswith("modeling_" )]
# Get the source code strings
a : Tuple = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
a : Optional[Any] = []
for config_param, default_value in zip(A_ , A_ ):
# `attributes` here is all the variant names for `config_param`
a : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_ , A_ , A_ , A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def lowercase ( )-> str:
'''simple docstring'''
a : List[Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a : str = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A_ : inspect.isclass(A_ )
and issubclass(A_ , A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
a : Dict = unused_attributes
if len(A_ ) > 0:
a : Union[str, Any] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 366 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
def get_matched_characters(A_ , A_ ) -> str:
a : Optional[int] = []
a : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a : int = int(max(0 , i - limit ) )
a : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A_ )
a : int = F'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}'''
return "".join(A_ )
# matching characters
a : Tuple = get_matched_characters(A_ , A_ )
a : str = get_matched_characters(A_ , A_ )
a : List[str] = len(A_ )
# transposition
a : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(A_ , A_ ) if ca != ca] ) // 2
)
if not match_count:
a : Tuple = 0.0
else:
a : List[str] = (
1
/ 3
* (
match_count / len(A_ )
+ match_count / len(A_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 226 | 0 |
'''simple docstring'''
import math
def __lowercase ( ) -> None:
'''simple docstring'''
_A = input("Enter message: " )
_A = int(input(F'''Enter key [2-{len(__lowercase ) - 1}]: ''' ) )
_A = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
_A = encrypt_message(__lowercase , __lowercase )
elif mode.lower().startswith("d" ):
_A = decrypt_message(__lowercase , __lowercase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + '|'}''' )
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = [""] * key
for col in range(__lowercase ):
_A = col
while pointer < len(__lowercase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__lowercase )
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = math.ceil(len(__lowercase ) / key )
_A = key
_A = (num_cols * num_rows) - len(__lowercase )
_A = [""] * num_cols
_A = 0
_A = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_A = 0
row += 1
return "".join(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 79 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) | 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a: int = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 214 | '''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , UpperCAmelCase )
lowercase__ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ : str = dataset_size < in_memory_max_size
else:
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = is_small_dataset(UpperCAmelCase )
assert result == expected
| 214 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase :
__lowercase = None
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = None
__lowercase = None
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = True
__lowercase = None
__lowercase = 1
__lowercase = None
__lowercase = False
__lowercase = None
__lowercase = None
def UpperCAmelCase_ ( self :Tuple )-> int:
return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
| 237 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: List[str] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**__A )
__a : int = num_channels
__a : Union[str, Any] = patch_size
__a : Optional[int] = num_stages
__a : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : Dict = [3, 3, 9, 3] if depths is None else depths
__a : Tuple = hidden_act
__a : Union[str, Any] = initializer_range
__a : Dict = layer_norm_eps
__a : Union[str, Any] = drop_path_rate
__a : Union[str, Any] = image_size
__a : int = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a : List[Any] = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names ) | 361 |
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = '''base_with_context'''
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
__UpperCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCAmelCase : Dict = weights[f'''layers_{lyr_num}''']
__UpperCAmelCase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__UpperCAmelCase : Optional[int] = ly_weight["attention"]
__UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
__UpperCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCAmelCase : str = weights[f'''layers_{lyr_num}''']
__UpperCAmelCase : Optional[Any] = ly_weight["attention"]
__UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__UpperCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
__UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
__UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
__UpperCAmelCase : int = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ), requires_grad=snake_case__ )
__UpperCAmelCase : int = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCAmelCase : int = weights[f'''layers_{lyr_num}''']
__UpperCAmelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
__UpperCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
__UpperCAmelCase : List[str] = ly_weight["self_attention"]
__UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__UpperCAmelCase : Dict = ly_weight["MultiHeadDotProductAttention_0"]
__UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
__UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
__UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
__UpperCAmelCase : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
__UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
__UpperCAmelCase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
__UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
__UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
__UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
__UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
__UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCAmelCase : str = jnp.tree_util.tree_map(onp.array, snake_case__ )
__UpperCAmelCase : Optional[int] = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
__UpperCAmelCase : List[Any] = os.path.join(args.checkpoint_path, "..", "config.gin" )
__UpperCAmelCase : Tuple = inference.parse_training_gin_file(snake_case__, snake_case__ )
__UpperCAmelCase : Optional[int] = inference.InferenceModel(args.checkpoint_path, snake_case__ )
__UpperCAmelCase : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large" )
__UpperCAmelCase : Optional[int] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
__UpperCAmelCase : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["targets_context"], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="gated-gelu", )
__UpperCAmelCase : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["targets_context"], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
__UpperCAmelCase : Union[str, Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"], snake_case__ )
__UpperCAmelCase : List[str] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"], snake_case__ )
__UpperCAmelCase : Dict = load_decoder(ta_checkpoint["target"]["decoder"], snake_case__ )
__UpperCAmelCase : Tuple = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
__UpperCAmelCase : Union[str, Any] = SpectrogramDiffusionPipeline(
notes_encoder=snake_case__, continuous_encoder=snake_case__, decoder=snake_case__, scheduler=snake_case__, melgan=snake_case__, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
_snake_case = parser.parse_args()
main(args)
| 157 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__: Optional[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str]=False ) -> Dict:
__UpperCAmelCase : Dict = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__UpperCAmelCase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _snake_case ( _lowercase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: str=13 , __lowerCamelCase: Any=7 , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: str=32 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: int=5_12 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: Union[str, Any]=None , ) -> Optional[int]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : List[str] = embedding_size
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = TFMobileBertModel(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ) -> Optional[int]:
__UpperCAmelCase : List[str] = TFMobileBertForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = TFMobileBertForNextSentencePrediction(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__lowerCamelCase )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = TFMobileBertForSequenceClassification(config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : Tuple = TFMobileBertForMultipleChoice(config=__lowerCamelCase )
__UpperCAmelCase : Dict = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = TFMobileBertForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : Tuple = TFMobileBertForQuestionAnswering(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCamelCase )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__UpperCAmelCase : Dict = TFMobileBertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : str = model(__lowerCamelCase )[0]
__UpperCAmelCase : Any = [1, 6, 3_05_22]
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : str = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 157 | 1 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase : Union[str, Any] = numpy.array([0, 0])
lowerCAmelCase : List[str] = numpy.array([0.5, 0.8_6_6_0_2_5_4])
lowerCAmelCase : Dict = numpy.array([1, 0])
lowerCAmelCase : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case__ , snake_case__ ) -> list[numpy.ndarray]:
lowerCamelCase = initial_vectors
for _ in range(snake_case__ ):
lowerCamelCase = iteration_step(snake_case__ )
return vectors
def a__ ( snake_case__ ) -> list[numpy.ndarray]:
lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCamelCase = vectors[i + 1]
new_vectors.append(snake_case__ )
lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case__ , snake_case__ ) -> numpy.ndarray:
lowerCamelCase = numpy.radians(snake_case__ )
lowerCamelCase , lowerCamelCase = numpy.cos(snake_case__ ), numpy.sin(snake_case__ )
lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> None:
lowerCamelCase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCamelCase , lowerCamelCase = zip(*snake_case__ )
plt.plot(snake_case__ , snake_case__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 168 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 1
@register_to_config
def __init__( self , _a=2_000 , _a=0.1 , _a=20 , _a=1e-3 ):
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase = std.unsqueeze(-1 )
lowerCamelCase = -score / std
# compute
lowerCamelCase = -1.0 / len(self.timesteps )
lowerCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase = beta_t.unsqueeze(-1 )
lowerCamelCase = -0.5 * beta_t * x
lowerCamelCase = torch.sqrt(_a )
lowerCamelCase = drift - diffusion**2 * score
lowerCamelCase = x + drift * dt
# add noise
lowerCamelCase = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 168 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : int = 16
__A : Dict = 32
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = 16 ) -> Dict:
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
_UpperCAmelCase , padding='''longest''' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : List[str] = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCAmelCase ) == "1":
UpperCAmelCase = 2
# New Code #
UpperCAmelCase = int(args.gradient_accumulation_steps )
UpperCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_UpperCAmelCase )
UpperCAmelCase = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
UpperCAmelCase = model(**_UpperCAmelCase )
UpperCAmelCase = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**_UpperCAmelCase )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_UpperCAmelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 273 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 226 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a_ ( lowerCamelCase : Tuple ):
return getitem, k
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ):
return setitem, k, v
def a_ ( lowerCamelCase : Any ):
return delitem, k
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , *lowerCamelCase : Tuple ):
try:
return fun(lowerCamelCase , *lowerCamelCase ), None
except Exception as e:
return None, e
__snake_case =(
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
__snake_case =[
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
__snake_case =[
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
__snake_case =[
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
__snake_case =[
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__snake_case =[
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def a_ ( lowerCamelCase : List[str] ):
lowerCAmelCase = HashMap(initial_block_size=4 )
lowerCAmelCase = {}
for _, (fun, *args) in enumerate(lowerCamelCase ):
lowerCAmelCase , lowerCAmelCase = _run_operation(lowerCamelCase , lowerCamelCase , *lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = _run_operation(lowerCamelCase , lowerCamelCase , *lowerCamelCase )
assert my_res == py_res
assert str(lowerCamelCase ) == str(lowerCamelCase )
assert set(lowerCamelCase ) == set(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
assert set(my.items() ) == set(py.items() )
def a_ ( ):
def is_public(lowerCamelCase : str ) -> bool:
return not name.startswith('_' )
lowerCAmelCase = {name for name in dir({} ) if is_public(lowerCamelCase )}
lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(lowerCamelCase )}
assert dict_public_names > hash_public_names
| 55 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 55 | 1 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[list[int | float]] ):
'''simple docstring'''
lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = len(matrix[0] )
lowercase__ : Tuple = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for row in range(SCREAMING_SNAKE_CASE_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowercase__ : Optional[int] = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
if matrix[i][row] != 0:
lowercase__ , lowercase__ : str = matrix[i], matrix[row]
lowercase__ : Dict = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
lowercase__ : Union[str, Any] = ''
while len(SCREAMING_SNAKE_CASE_ ) % 3 != 0:
lowercase__ : List[str] = '0' + bin_string
lowercase__ : Any = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase__ : str = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE_ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE_ ) )
oct_string += str(SCREAMING_SNAKE_CASE_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 214 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ = 16
A__ = 32
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> int:
"""simple docstring"""
snake_case__ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Optional[int] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
snake_case__ : int = 8
else:
snake_case__ : List[str] = None
return tokenizer.pad(
__lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
snake_case__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCAmelCase ) == "1":
snake_case__ : Union[str, Any] = 2
# New Code #
snake_case__ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Optional[int] = config['''lr''']
snake_case__ : Optional[int] = int(config['''num_epochs'''] )
snake_case__ : Tuple = int(config['''seed'''] )
snake_case__ : Optional[int] = int(config['''batch_size'''] )
snake_case__ : List[str] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__lowerCAmelCase )
snake_case__ , snake_case__ : str = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
snake_case__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
snake_case__ : Optional[int] = model(**__lowerCAmelCase )
snake_case__ : Union[str, Any] = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Tuple = model(**__lowerCAmelCase )
snake_case__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
snake_case__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCAmelCase )
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case__ : List[str] = parser.parse_args()
snake_case__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 363 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[str, float]:
"""simple docstring"""
snake_case__ : List[Any] = len([g for position, g in enumerate(__lowerCAmelCase ) if g == main_target[position]] )
return (item, float(__lowerCAmelCase ))
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[str, str]:
"""simple docstring"""
snake_case__ : Tuple = random.randint(0 , len(__lowerCAmelCase ) - 1 )
snake_case__ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Dict = list(__lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : List[str] = random.choice(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> list[str]:
"""simple docstring"""
snake_case__ : Dict = []
# Generate more children proportionally to the fitness score.
snake_case__ : List[str] = int(parent_a[1] * 100 ) + 1
snake_case__ : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(__lowerCAmelCase ):
snake_case__ : List[str] = population_score[random.randint(0 , __lowerCAmelCase )][0]
snake_case__ , snake_case__ : Union[str, Any] = crossover(parent_a[0] , __lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(__lowerCAmelCase , __lowerCAmelCase ) )
pop.append(mutate(__lowerCAmelCase , __lowerCAmelCase ) )
return pop
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
snake_case__ : List[str] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowerCAmelCase )
# Generate random starting population.
snake_case__ : Tuple = []
for _ in range(__lowerCAmelCase ):
population.append(''''''.join([random.choice(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : str = [evaluate(__lowerCAmelCase , __lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
snake_case__ : str = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] , reverse=__lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowerCAmelCase )
# Normalize population score to be between 0 and 1.
snake_case__ : Dict = [
(item, score / len(__lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(__lowerCAmelCase ):
population.extend(select(population_score[int(__lowerCAmelCase )] , __lowerCAmelCase , __lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
A__ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
A__ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
A__ , A__ , A__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 44 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return None
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : Any = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase__ , "tf" , 1_2 , **lowerCAmelCase__ )
@require_torch
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase__ , "pt" , 1_2 , **lowerCAmelCase__ )
@require_torch
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
from transformers import BertModel
a__ : Union[str, Any] =["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCAmelCase__ ) )
vocab_file.flush()
a__ : Union[str, Any] =BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
a__ : Any =BertModel(BertConfig(vocab_size=len(lowerCAmelCase__ ) ) )
model.save_pretrained(lowerCAmelCase__ )
self._test_export(lowerCAmelCase__ , "pt" , 1_2 , lowerCAmelCase__ )
@require_tf
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
a__ : Optional[int] =self._test_export(lowerCAmelCase__ , "tf" , 1_2 , **lowerCAmelCase__ )
a__ : List[str] =quantize(Path(lowerCAmelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase__ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
a__ : List[str] =self._test_export(lowerCAmelCase__ , "pt" , 1_2 , **lowerCAmelCase__ )
a__ : List[Any] =quantize(lowerCAmelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase__ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
a__ : Any =Path(lowerCAmelCase__ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return path
except Exception as e:
self.fail(lowerCAmelCase__ )
@require_torch
@require_tokenizers
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
from transformers import BertModel
a__ : List[Any] =BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
a__ : int =BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCAmelCase__ , lowerCAmelCase__ , "pt" )
@require_tf
@require_tokenizers
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TFBertModel
a__ : Any =TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
a__ : str =BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCAmelCase__ , lowerCAmelCase__ , "tf" )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : int =FeatureExtractionPipeline(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int =["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
a__ , a__ , a__ , a__ : str =infer_shapes(lowerCAmelCase__ , lowerCAmelCase__ )
# Assert all variables are present
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCAmelCase__ )
self.assertSequenceEqual(variable_names[3:] , lowerCAmelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : int =["input_ids", "attention_mask", "token_type_ids"]
a__ : Any ={"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
a__ , a__ : Tuple =ensure_valid_input(FuncContiguousArgs() , lowerCAmelCase__ , lowerCAmelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCAmelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCAmelCase__ ) , set(lowerCAmelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCAmelCase__ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
a__ , a__ : Any =ensure_valid_input(FuncNonContiguousArgs() , lowerCAmelCase__ , lowerCAmelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 95 |
import os
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =len(grid[0] )
a__ =len(_A )
a__ =0
a__ =0
a__ =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_A ):
for j in range(n_rows - 3 ):
a__ =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a__ =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a__ =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a__ =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a__ =max(
_A , _A , _A , _A )
if max_product > largest:
a__ =max_product
return largest
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[]
with open(os.path.dirname(_A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
a__ =[[int(_A ) for i in grid[j]] for j in range(len(_A ) )]
return largest_product(_A )
if __name__ == "__main__":
print(solution())
| 188 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : Dict):
torch.manual_seed(0)
_A : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def A ( self : Dict):
torch.manual_seed(0)
_A : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def A ( self : Union[str, Any]):
torch.manual_seed(0)
_A : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_a)
def A ( self : Optional[int]):
_A : List[str] = self.dummy_uncond_unet
_A : int = DDIMScheduler()
_A : List[Any] = self.dummy_vq_model
_A : str = LDMPipeline(unet=_a , vqvae=_a , scheduler=_a)
ldm.to(_a)
ldm.set_progress_bar_config(disable=_a)
_A : List[str] = torch.manual_seed(0)
_A : Any = ldm(generator=_a , num_inference_steps=2 , output_type='numpy').images
_A : Optional[int] = torch.manual_seed(0)
_A : str = ldm(generator=_a , num_inference_steps=2 , output_type='numpy' , return_dict=_a)[0]
_A : Optional[Any] = image[0, -3:, -3:, -1]
_A : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
_A : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int]):
_A : str = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(_a)
ldm.set_progress_bar_config(disable=_a)
_A : int = torch.manual_seed(0)
_A : Any = ldm(generator=_a , num_inference_steps=5 , output_type='numpy').images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A : Union[str, Any] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447])
_A : Optional[int] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 363 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227 | 0 |
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a :
pass
| 168 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : Any = TypeVar("T")
class a ( Generic[T] ):
def __init__( self , __magic_name__ , __magic_name__ ) -> None:
_a = None
_a = len(__magic_name__ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> T | None: # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : Union[str, Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
a_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
a_ : Dict = SegmentTree(test_array, min)
a_ : Optional[int] = SegmentTree(test_array, max)
a_ : int = SegmentTree(test_array, lambda a, b: a + b)
def _A () -> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
a_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 168 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0 ) -> Optional[Any]:
# Format the message.
if name is None:
UpperCamelCase_ = None
else:
UpperCamelCase_ = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase_ = fmt.format(UpperCamelCase_ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if msg is not None:
print(UpperCamelCase_ )
for k in val.keys():
recursive_print(UpperCamelCase_ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase_ , torch.Tensor ):
print(UpperCamelCase_ , ":" , val.size() )
else:
print(UpperCamelCase_ , ":" , UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCamelCase_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase_ = param.view(*UpperCamelCase_ )
UpperCamelCase_ = param.transpose(0 , 2 )
UpperCamelCase_ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase_ = param.view(*UpperCamelCase_ )
UpperCamelCase_ = param.transpose(0 , 1 ).contiguous()
UpperCamelCase_ = param.view(*UpperCamelCase_ )
return param
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
# The converted output model.
UpperCamelCase_ = {}
# old versions did not store training args
UpperCamelCase_ = input_state_dict.get("args" , UpperCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase_ = ds_args.padded_vocab_size
UpperCamelCase_ = ds_args.max_position_embeddings
UpperCamelCase_ = ds_args.hidden_size
UpperCamelCase_ = ds_args.num_layers
UpperCamelCase_ = ds_args.num_attention_heads
UpperCamelCase_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase_ = config.n_head
# The hidden_size per head.
UpperCamelCase_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase_ = input_state_dict["checkpoint_version"]
else:
UpperCamelCase_ = 0.0
# The model.
UpperCamelCase_ = input_state_dict["model"]
# The language model.
UpperCamelCase_ = model["language_model"]
# The embeddings.
UpperCamelCase_ = lm["embedding"]
# The word embeddings.
UpperCamelCase_ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase_ = word_embeddings[: config.vocab_size, :]
UpperCamelCase_ = word_embeddings
# The position embeddings.
UpperCamelCase_ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase_ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
UpperCamelCase_ = pos_embeddings
# The transformer.
UpperCamelCase_ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase_ = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase_ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase_ = layer_re.match(UpperCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase_ = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase_ = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase_ = m.group(3 )
# The name of the layer.
UpperCamelCase_ = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase_ = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase_ = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase_ = masked_bias
UpperCamelCase_ = fix_query_key_value_ordering(UpperCamelCase_ , UpperCamelCase_ , 3 , UpperCamelCase_ , UpperCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase_ = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase_ = fix_query_key_value_ordering(UpperCamelCase_ , UpperCamelCase_ , 3 , UpperCamelCase_ , UpperCamelCase_ )
# Store. No change of shape.
UpperCamelCase_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase_ = megatron_to_transformers[op_name]
UpperCamelCase_ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase_ = megatron_to_transformers[op_name]
UpperCamelCase_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase_ = transformer["final_layernorm.weight"]
UpperCamelCase_ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase_ = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ) -> int:
# Create the argument parser.
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=UpperCamelCase_ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=UpperCamelCase_ , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase_ = parser.parse_args()
# Extract the basename.
UpperCamelCase_ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase_ = torch.load(UpperCamelCase_ , map_location="cpu" )
else:
UpperCamelCase_ = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase_ = input_state_dict.get("args" , UpperCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase_ = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase_ = "gelu_new"
else:
UpperCamelCase_ = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase_ = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase_ = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=UpperCamelCase_ , summary_activation=UpperCamelCase_ , summary_proj_to_labels=UpperCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , bos_token_id=50256 , eos_token_id=50256 , )
else:
UpperCamelCase_ = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase_ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase_ = convert_megatron_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase_ , UpperCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase_ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
UpperCamelCase_ = "gpt2"
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
UpperCamelCase_ = type(UpperCamelCase_ ).__name__
UpperCamelCase_ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(UpperCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(UpperCamelCase_ )
# Store the state_dict to file.
UpperCamelCase_ = os.path.join(UpperCamelCase_ , "pytorch_model.bin" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "bridgetower_vision_model"
def __init__( self , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=3 , UpperCamelCase=16 , UpperCamelCase=288 , UpperCamelCase=1 , UpperCamelCase=1e-05 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = stop_gradient
lowerCamelCase_ = share_layernorm
lowerCamelCase_ = remove_last_layer
@classmethod
def snake_case ( cls , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
if config_dict.get("model_type" ) == "bridgetower":
lowerCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "bridgetower_text_model"
def __init__( self , UpperCamelCase=5_0265 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=1 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=514 , UpperCamelCase=1 , UpperCamelCase=1e-05 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase="absolute" , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
@classmethod
def snake_case ( cls , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
if config_dict.get("model_type" ) == "bridgetower":
lowerCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "bridgetower"
def __init__( self , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=768 , UpperCamelCase=1 , UpperCamelCase=1e-05 , UpperCamelCase=False , UpperCamelCase="add" , UpperCamelCase=12 , UpperCamelCase=6 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
# TODO: remove this once the Hub files are updated.
lowerCamelCase_ = kwargs.pop("text_config_dict" , UpperCamelCase )
lowerCamelCase_ = kwargs.pop("vision_config_dict" , UpperCamelCase )
super().__init__(**UpperCamelCase )
lowerCamelCase_ = share_cross_modal_transformer_layers
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_size
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = share_link_tower_layers
lowerCamelCase_ = link_tower_type
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = tie_word_embeddings
lowerCamelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
lowerCamelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
lowerCamelCase_ = BridgeTowerTextConfig(**UpperCamelCase )
lowerCamelCase_ = BridgeTowerVisionConfig(**UpperCamelCase )
@classmethod
def snake_case ( cls , UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.text_config.to_dict()
lowerCamelCase_ = self.vision_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 55 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether tp freeze the encoder."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCamelCase = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
_lowerCamelCase = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Source language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Target language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "# num_beams to use for evaluation."} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'''{split}_results.json''' ) )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCamelCase_ = SeqaSeqDataset
# Get datasets
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase_ ) if training_args.predict_with_generate else None
)
lowerCamelCase_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase_ , UpperCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
lowerCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowerCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
lowerCamelCase_ = data_args.n_val
lowerCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ = trainer.predict(test_dataset=UpperCAmelCase_ , metric_key_prefix="test" )
lowerCamelCase_ = test_output.metrics
lowerCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
lowerCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.predict_with_generate:
lowerCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
lowerCamelCase_ = lmap(str.strip , UpperCAmelCase_ )
write_txt_file(UpperCAmelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def __snake_case ( UpperCAmelCase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 55 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
# Initialise PyTorch model
A__ = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
A__ = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 69 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_config()
A__ = 3_00
return config
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = DebertaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 69 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_A = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_A = grid[row_n]
_A = fill_row(__lowercase , __lowercase )
_A = grid[row_n]
return grid[-1][-1]
def __lowercase ( __lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''mgp-str'''
def __init__( self : str ,_a : Any=[32, 128] ,_a : Any=4 ,_a : int=3 ,_a : int=27 ,_a : Union[str, Any]=38 ,_a : List[Any]=5_0257 ,_a : str=3_0522 ,_a : Any=768 ,_a : Union[str, Any]=12 ,_a : Any=12 ,_a : int=4.0 ,_a : Dict=True ,_a : Any=False ,_a : Any=1E-5 ,_a : Optional[Any]=0.0 ,_a : Dict=0.0 ,_a : List[Any]=0.0 ,_a : Optional[int]=False ,_a : Optional[Any]=0.02 ,**_a : List[str] ,):
'''simple docstring'''
super().__init__(**_a )
_a : List[Any] = image_size
_a : List[str] = patch_size
_a : str = num_channels
_a : Optional[Any] = max_token_length
_a : Dict = num_character_labels
_a : Union[str, Any] = num_bpe_labels
_a : Tuple = num_wordpiece_labels
_a : int = hidden_size
_a : int = num_hidden_layers
_a : Union[str, Any] = num_attention_heads
_a : Optional[int] = mlp_ratio
_a : List[str] = distilled
_a : str = layer_norm_eps
_a : int = drop_rate
_a : List[Any] = qkv_bias
_a : List[Any] = attn_drop_rate
_a : Dict = drop_path_rate
_a : Union[str, Any] = output_aa_attentions
_a : Optional[int] = initializer_range
| 5 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def a ( ):
'''simple docstring'''
lowercase_ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=snake_case__ )
lowercase_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=snake_case__ )
env_command_parser(subparsers=snake_case__ )
launch_command_parser(subparsers=snake_case__ )
tpu_command_parser(subparsers=snake_case__ )
test_command_parser(subparsers=snake_case__ )
# Let's go
lowercase_ = parser.parse_args()
if not hasattr(snake_case__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(snake_case__ )
if __name__ == "__main__":
main()
| 30 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a( A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a = MBartConfig(
is_decoder=A , is_encoder_decoder=A , add_cross_attention=A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A , add_final_layer_norm=A , )
return encoder_config, decoder_config
def a( A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
a = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
a = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a = "encoder." + name
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
a = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a = "encoder.layernorm.bias"
return name
def a( A : Union[str, Any] , A : Tuple ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(A )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def a( A : List[Any] , A : Tuple=None , A : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
a = DonutModel.from_pretrained(A ).eval()
# load HuggingFace model
a , a = get_configs(A )
a = DonutSwinModel(A )
a = MBartForCausalLM(A )
a = VisionEncoderDecoderModel(encoder=A , decoder=A )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(A , A )
model.load_state_dict(A )
# verify results on scanned document
a = load_dataset("hf-internal-testing/example-documents" )
a = dataset["test"][0]["image"].convert("RGB" )
a = XLMRobertaTokenizerFast.from_pretrained(A , from_slow=A )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a = DonutProcessor(A , A )
a = processor(A , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a = "When is the coffee break?"
a = task_prompt.replace("{user_input}" , A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = "hello world"
else:
raise ValueError("Model name not supported" )
a = original_model.decoder.tokenizer(A , add_special_tokens=A , return_tensors="pt" )[
"input_ids"
]
a = original_model.encoder.model.patch_embed(A )
a , a = model.encoder.embeddings(A )
assert torch.allclose(A , A , atol=1e-3 )
# verify encoder hidden states
a = original_model.encoder(A )
a = model.encoder(A ).last_hidden_state
assert torch.allclose(A , A , atol=1e-2 )
# verify decoder hidden states
a = original_model(A , A , A ).logits
a = model(A , decoder_input_ids=A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowercase: Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , a: Optional[Any] , a: Optional[Any]=3 , a: List[str]=32 , a: Optional[int]=3 , a: Any=10 , a: List[str]=[10, 20, 30, 40] , a: Any=[1, 1, 2, 1] , a: Optional[int]=True , a: List[str]=True , a: Tuple="relu" , a: List[Any]=3 , a: List[Any]=None , ):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Any = batch_size
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = embeddings_size
__lowerCamelCase : Optional[int] = hidden_sizes
__lowerCamelCase : Optional[Any] = depths
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Tuple = scope
__lowerCamelCase : Union[str, Any] = len(a )
def _snake_case ( self: int ):
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values
def _snake_case ( self: List[str] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self: Tuple , a: Optional[int] , a: int ):
__lowerCamelCase : Optional[Any] = FlaxRegNetModel(config=a )
__lowerCamelCase : List[str] = model(a )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self: Optional[int] , a: List[Any] , a: List[Any] ):
__lowerCamelCase : Tuple = self.num_labels
__lowerCamelCase : Union[str, Any] = FlaxRegNetForImageClassification(config=a )
__lowerCamelCase : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: Tuple ):
__lowerCamelCase : Dict = FlaxRegNetModelTester(self )
__lowerCamelCase : List[str] = ConfigTester(self , config_class=a , has_text_modality=a )
def _snake_case ( self: Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self: List[Any] ):
return
def _snake_case ( self: Dict ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _snake_case ( self: Tuple ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _snake_case ( self: str ):
pass
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(a )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: List[str] ):
def check_hidden_states_output(a: List[Any] , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : str = model_class(a )
__lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
check_hidden_states_output(a , a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : str = self._prepare_for_class(a , a )
__lowerCamelCase : List[str] = model_class(a )
@jax.jit
def model_jitted(a: Optional[int] , **a: str ):
return model(pixel_values=a , **a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : List[str] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Optional[int] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Tuple ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCamelCase : Tuple = self.default_image_processor
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=a , return_tensors='np' )
__lowerCamelCase : List[Any] = model(**a )
# verify the logits
__lowerCamelCase : str = (1, 1000)
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : int = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 194 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__a :Optional[Any] = logging.getLogger(__name__)
def __snake_case ( __UpperCamelCase : Any=2 ,__UpperCamelCase : Union[str, Any]=3 ,__UpperCamelCase : Union[str, Any]=16 ,__UpperCamelCase : int = 10 ,__UpperCamelCase : int = 2 ):
"""simple docstring"""
def get_dataset(__UpperCamelCase : Optional[int] ):
A_ = torch.randn(batch_size * n_batches ,1 )
return TensorDataset(__UpperCamelCase ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) )
A_ = get_dataset(__UpperCamelCase )
A_ = get_dataset(__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,batch_size=__UpperCamelCase ,num_workers=4 )
A_ = DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,batch_size=__UpperCamelCase ,num_workers=4 )
return (train_dataloader, valid_dataloader)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = []
for epoch in range(__UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
A_ , A_ = batch
A_ = model(__UpperCamelCase )
A_ = torch.nn.functional.mse_loss(__UpperCamelCase ,__UpperCamelCase )
accelerator.backward(__UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : str ):
super().__init__()
A_ = nn.Parameter(torch.randn(1 ) )
A_ = nn.Parameter(torch.randn(1 ) )
def __A ( self : str , UpperCAmelCase : List[Any] ):
return x * self.a + self.b
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
A_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
# Train baseline
A_ = Accelerator()
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , "initial" )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
A_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
# Train partially
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = Accelerator()
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoint" )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __A ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
A_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
# Train partially
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_0" ) )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __A ( self : List[str] ):
A_ = torch.tensor([1, 2, 3] )
A_ = torch.tensor([2, 3, 4] )
A_ = DummyModel()
A_ = torch.optim.Adam(net.parameters() )
A_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def __A ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
A_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def __A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def __A ( self : Dict ):
A_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
__a :Optional[int] = "/tmp/accelerate/state_checkpointing"
__a :List[Any] = DummyModel()
__a :Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__a :int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__a :str = dummy_dataloaders()
__a :Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__a :List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__a :Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__a :str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__a :int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
__a :Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__a :Any = group["params"][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__a :List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 312 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
from __future__ import annotations
import math
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : List[str] =u
for i in range(1 , SCREAMING_SNAKE_CASE ):
a__ : Optional[Any] =temp * (u - i)
return temp
def _A ( ):
"""simple docstring"""
a__ : Tuple =int(input("enter the numbers of values: " ) )
a__ : list[list[float]] =[]
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =0
print("enter the values of parameters in a list: " )
a__ : List[str] =list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(SCREAMING_SNAKE_CASE ):
a__ : Tuple =float(input() )
a__ : int =int(input("enter the value to interpolate: " ) )
a__ : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
a__ : Optional[int] =y[j + 1][i - 1] - y[j][i - 1]
a__ : List[str] =y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 148 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : List[Any] ={
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : List[Any] ={
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a__ : Optional[int] =f'''{src_lang}-{tgt_lang}'''
a__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
a__ : Tuple =os.path.join(SCREAMING_SNAKE_CASE , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
UpperCAmelCase : str = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase : Dict = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = model_name.split("""-""")
UpperCAmelCase : Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 148 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=30, lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=32, lowerCAmelCase__=5, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=10, lowerCAmelCase__=0.02, lowerCAmelCase__=None, lowerCAmelCase__=2, ) -> Any:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 1
def a_ ( self) -> Any:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> Union[str, Any]:
return ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
snake_case_ = 1
snake_case_ = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
snake_case_ = 1
snake_case_ = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def a_ ( self) -> Any:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Optional[int]:
snake_case_ = ViTModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=37)
def a_ ( self) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def a_ ( self) -> List[str]:
pass
def a_ ( self) -> Optional[Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear))
def a_ ( self) -> List[str]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def a_ ( self) -> str:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def UpperCAmelCase ( ) -> List[Any]:
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def a_ ( self) -> Dict:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def a_ ( self) -> List[Any]:
snake_case_ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(lowerCAmelCase__)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
# verify the logits
snake_case_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase__, atol=1e-4))
@slow
def a_ ( self) -> List[Any]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case_ = ViTModel.from_pretrained('facebook/dino-vits8').to(lowerCAmelCase__)
snake_case_ = ViTImageProcessor.from_pretrained('facebook/dino-vits8', size=480)
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt')
snake_case_ = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__, interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
snake_case_ = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape, lowerCAmelCase__)
snake_case_ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase__, atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def a_ ( self) -> Union[str, Any]:
snake_case_ = ViTModel.from_pretrained('facebook/dino-vits8', torch_dtype=torch.floataa, device_map='auto')
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt')
snake_case_ = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__)
| 69 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _lowercase ):
lowerCamelCase__: List[Any] = ["image_processor", "tokenizer"]
lowerCamelCase__: List[str] = "AutoImageProcessor"
lowerCamelCase__: List[Any] = "AutoTokenizer"
def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: int ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.image_processor
def __call__( self: List[str] , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Union[str, Any]=None , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if images is not None:
__UpperCAmelCase : str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def _lowerCamelCase ( self: Any , *__lowerCamelCase: str , **__lowerCamelCase: List[Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: List[str] ) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: str ) -> int:
return ["input_ids", "attention_mask", "pixel_values"]
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''mgp-str'''
def __init__(self , UpperCAmelCase=[3_2, 1_2_8] , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=2_7 , UpperCAmelCase=3_8 , UpperCAmelCase=5_0_2_5_7 , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=4.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=0.02 , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =max_token_length
_lowercase =num_character_labels
_lowercase =num_bpe_labels
_lowercase =num_wordpiece_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =mlp_ratio
_lowercase =distilled
_lowercase =layer_norm_eps
_lowercase =drop_rate
_lowercase =qkv_bias
_lowercase =attn_drop_rate
_lowercase =drop_path_rate
_lowercase =output_aa_attentions
_lowercase =initializer_range
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Optional[int] = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "finetuned" not in model_name:
lowercase__ : str = False
if "finetuned" in model_name:
lowercase__ : Optional[Any] = '''huggingface/label-files'''
if "kinetics" in model_name:
lowercase__ : Union[str, Any] = 4_00
lowercase__ : List[Any] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowercase__ : int = 1_74
lowercase__ : Optional[Any] = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowercase__ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : int = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : Tuple = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
if "small" in model_name:
lowercase__ : Optional[int] = 3_84
lowercase__ : Optional[int] = 15_36
lowercase__ : Optional[Any] = 12
lowercase__ : Any = 16
lowercase__ : List[Any] = 12
lowercase__ : Tuple = 3
lowercase__ : List[Any] = 1_92
lowercase__ : List[str] = 7_68
elif "large" in model_name:
lowercase__ : Optional[int] = 10_24
lowercase__ : Dict = 40_96
lowercase__ : Dict = 24
lowercase__ : List[Any] = 16
lowercase__ : Optional[Any] = 12
lowercase__ : List[str] = 8
lowercase__ : Optional[int] = 5_12
lowercase__ : str = 20_48
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 12_80
lowercase__ : int = 51_20
lowercase__ : Tuple = 32
lowercase__ : Union[str, Any] = 16
lowercase__ : Tuple = 12
lowercase__ : Optional[int] = 8
lowercase__ : List[str] = 6_40
lowercase__ : Tuple = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either \"small\", \"base\", \"large\", or \"huge\"''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
if "encoder." in name:
lowercase__ : str = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowercase__ : List[str] = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowercase__ : List[Any] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase__ : List[str] = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase__ : List[str] = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowercase__ : List[str] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase__ : Tuple = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowercase__ : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowercase__ : Tuple = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowercase__ : List[Any] = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ : int = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__ : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ : List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase__ : Tuple = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase__ : Union[str, Any] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase__ : Tuple = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowercase__ : Any = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowercase__ : Optional[Any] = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowercase__ : Any = name.replace('''head''' , '''classifier''' )
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
for key in orig_state_dict.copy().keys():
lowercase__ : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith('''encoder.''' ):
lowercase__ : Union[str, Any] = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowercase__ : str = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowercase__ : Optional[Any] = config.decoder_hidden_size
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Any = '''decoder.decoder_layers.'''
if "weight" in key:
lowercase__ : Optional[Any] = val[:dim, :]
lowercase__ : int = val[dim : dim * 2, :]
lowercase__ : Dict = val[-dim:, :]
else:
lowercase__ : Any = config.hidden_size
lowercase__ : Dict = int(key_split[1] )
lowercase__ : int = '''videomae.encoder.layer.'''
if "weight" in key:
lowercase__ : List[Any] = val[:dim, :]
lowercase__ : Any = val[dim : dim * 2, :]
lowercase__ : Any = val[-dim:, :]
else:
lowercase__ : Tuple = val
return orig_state_dict
def __UpperCAmelCase ( ) -> List[Any]:
lowercase__ : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase__ : List[Any] = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = get_videomae_config(SCREAMING_SNAKE_CASE__ )
if "finetuned" in model_name:
lowercase__ : Union[str, Any] = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE__ )
else:
lowercase__ : Dict = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
# download original checkpoint, hosted on Google Drive
lowercase__ : List[str] = '''pytorch_model.bin'''
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
lowercase__ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
if "model" in files:
lowercase__ : str = files['''model''']
else:
lowercase__ : List[Any] = files['''module''']
lowercase__ : str = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify model on basic input
lowercase__ : Optional[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowercase__ : Any = prepare_video()
lowercase__ : int = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowercase__ : Any = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowercase__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ )
lowercase__ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
lowercase__ : int = outputs.logits
lowercase__ : int = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowercase__ : List[str] = torch.Size([1, 4_00] )
lowercase__ : Dict = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
lowercase__ : Optional[Any] = torch.Size([1, 1_74] )
lowercase__ : List[str] = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
lowercase__ : str = torch.Size([1, 14_08, 15_36] )
lowercase__ : Dict = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
lowercase__ : Union[str, Any] = torch.Size([1, 14_08, 15_36] )
lowercase__ : Union[str, Any] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowercase__ : Tuple = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
lowercase__ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
lowercase__ : Union[str, Any] = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowercase__ : Dict = torch.Size([1, 4_00] )
lowercase__ : List[str] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowercase__ : Any = torch.Size([1, 4_00] )
lowercase__ : str = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowercase__ : str = torch.Size([1, 4_00] )
lowercase__ : Optional[Any] = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
lowercase__ : str = torch.Size([1, 4_00] )
lowercase__ : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
lowercase__ : str = torch.Size([1, 14_08, 15_36] )
lowercase__ : int = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowercase__ : Optional[int] = torch.Size([1, 1_74] )
lowercase__ : Optional[Any] = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
lowercase__ : int = torch.Size([1, 14_08, 15_36] )
lowercase__ : List[Any] = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowercase__ : str = torch.Size([1, 1_74] )
lowercase__ : Any = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowercase__ : Optional[int] = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='''nielsr''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 367 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = """http://www.mocksite.com/file1.txt"""
_a = """\"text\": [\"foo\", \"foo\"]"""
_a = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase:
lowercase__ = 2_00
lowercase__ = {'Content-Length': '100'}
lowercase__ = {}
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
return [bytes(__a , '''utf-8''')]
def lowerCamelCase__ ( *__snake_case, **__snake_case ) -> int:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(__snake_case, '''request''', __snake_case )
_UpperCamelCase = URL
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = url
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [url]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': url}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = '''downloads'''
_UpperCamelCase = tmp_path
_UpperCamelCase = DownloadConfig(
cache_dir=os.path.join(__snake_case, __snake_case ), use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.download(__snake_case )
_UpperCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [downloaded_paths]
_UpperCamelCase = [urls]
elif isinstance(__snake_case, __snake_case ):
assert "train" in downloaded_paths.keys()
_UpperCamelCase = downloaded_paths.values()
_UpperCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case, __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCamelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(__snake_case )
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = filename
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [filename]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': filename}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = xz_file.parent
_UpperCamelCase = '''extracted'''
_UpperCamelCase = DownloadConfig(
cache_dir=__snake_case, use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.extract(__snake_case )
_UpperCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [extracted_paths]
_UpperCamelCase = [paths]
elif isinstance(__snake_case, __snake_case ):
assert "train" in extracted_paths.keys()
_UpperCamelCase = extracted_paths.values()
_UpperCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case, __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case, etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCamelCase = extracted_path.read_text()
_UpperCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case, start=1 ):
_UpperCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ), start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 194 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : Optional[int] = BlipImageProcessor()
snake_case_ : Tuple = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case_ : List[str] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
snake_case_ : Union[str, Any] = InstructBlipProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).qformer_tokenizer
def _lowerCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : List[str] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ : Any = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case_ : Union[str, Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.qformer_tokenizer , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : str = self.get_image_processor()
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_qformer_tokenizer()
snake_case_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : List[str] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case_ : Optional[Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : List[Any] = self.get_qformer_tokenizer()
snake_case_ : List[Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : str = "lower newer"
snake_case_ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = qformer_tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_qformer_tokenizer()
snake_case_ : Optional[Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = "lower newer"
snake_case_ : Dict = self.prepare_image_inputs()
snake_case_ : str = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Any = self.get_image_processor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_qformer_tokenizer()
snake_case_ : List[str] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : Any = processor.batch_decode(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[str] = self.get_qformer_tokenizer()
snake_case_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = "lower newer"
snake_case_ : Dict = self.prepare_image_inputs()
snake_case_ : Tuple = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 36 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36 | 1 |
"""simple docstring"""
from __future__ import annotations
import bisect
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ):
if hi < 0:
snake_case : Tuple = len(lowercase__ )
while lo < hi:
snake_case : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case : Optional[int] = mid + 1
else:
snake_case : int = mid
return lo
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ):
if hi < 0:
snake_case : Tuple = len(lowercase__ )
while lo < hi:
snake_case : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case : Any = mid + 1
else:
snake_case : Optional[Any] = mid
return lo
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int ):
snake_case : Any = 0
snake_case : Union[str, Any] = len(lowercase__ ) - 1
while left <= right:
snake_case : Optional[int] = left + (right - left) // 2
snake_case : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case : Any = midpoint - 1
else:
snake_case : Optional[Any] = midpoint + 1
return None
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int ):
snake_case : Optional[int] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def UpperCamelCase__ ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int , lowercase__ : int ):
if right < left:
return None
snake_case : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__A = input("Enter numbers separated by comma:\n").strip()
__A = sorted(int(item) for item in user_input.split(","))
__A = int(input("Enter a single number to be found in the list:\n"))
__A = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 148 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : List[Any] ):
snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : Tuple = weights[F'''layers_{lyr_num}''']
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : List[Any] = ly_weight["attention"]
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : List[Any] ):
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case : str = weights[F'''layers_{lyr_num}''']
snake_case : Any = ly_weight["attention"]
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : Union[str, Any] ):
snake_case : int = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case : Union[str, Any] = weights[F'''layers_{lyr_num}''']
snake_case : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Union[str, Any] = ly_weight["self_attention"]
snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : List[str] = ly_weight["MultiHeadDotProductAttention_0"]
snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase__ ( lowercase__ : Any ):
snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case : List[Any] = jnp.tree_util.tree_map(onp.array , lowercase__ )
snake_case : Tuple = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
snake_case : List[str] = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case : List[str] = inference.parse_training_gin_file(lowercase__ , lowercase__ )
snake_case : List[Any] = inference.InferenceModel(args.checkpoint_path , lowercase__ )
snake_case : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case : int = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , lowercase__ )
snake_case : Any = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , lowercase__ )
snake_case : List[Any] = load_decoder(ta_checkpoint["target"]["decoder"] , lowercase__ )
snake_case : int = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 148 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_UpperCAmelCase : Any = random.Random()
def A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase = global_rng
UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2_000 , A_=1 , A_=0.0 , A_=16_000 , A_=True , A_=80 , A_=16 , A_=64 , A_="hann_window" , A_=80 , A_=7_600 , A_=1e-10 , A_=True , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = min_seq_length
UpperCamelCase = max_seq_length
UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase = feature_size
UpperCamelCase = padding_value
UpperCamelCase = sampling_rate
UpperCamelCase = do_normalize
UpperCamelCase = num_mel_bins
UpperCamelCase = hop_length
UpperCamelCase = win_length
UpperCamelCase = win_function
UpperCamelCase = fmin
UpperCamelCase = fmax
UpperCamelCase = mel_floor
UpperCamelCase = return_attention_mask
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __UpperCamelCase ( self , A_=False , A_=False ) -> Tuple:
"""simple docstring"""
def _flatten(A_ ):
return list(itertools.chain(*__A ) )
if equal_length:
UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
def __UpperCamelCase ( self , A_=False , A_=False ) -> str:
"""simple docstring"""
if equal_length:
UpperCamelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase ( lowerCamelCase__ , unittest.TestCase ):
__lowercase : Union[str, Any] = SpeechTaFeatureExtractor
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = SpeechTaFeatureExtractionTester(self )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A , axis=0 ) - 1 ) < 1e-3 ) )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
UpperCamelCase = feat_extract(__A , return_tensors='np' ).input_values
UpperCamelCase = feat_extract(__A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCamelCase = [None, 1_600, None]
for max_length, padding in zip(__A , __A ):
UpperCamelCase = feat_extract(__A , padding=__A , max_length=__A , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = range(800 , 1_400 , 200 )
UpperCamelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
UpperCamelCase = [None, 1_600, None]
for max_length, padding in zip(__A , __A ):
UpperCamelCase = feat_extract(__A , max_length=__A , padding=__A )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = feat_extract(
__A , truncation=__A , max_length=1_000 , padding='max_length' , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = feat_extract(
__A , truncation=__A , max_length=1_000 , padding='longest' , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = feat_extract(
__A , truncation=__A , max_length=2_000 , padding='longest' , return_tensors='np' )
UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCamelCase = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase = feature_extractor(audio_target=__A , padding=__A , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
UpperCamelCase = feature_extractor(__A , return_tensors='np' ).input_values
UpperCamelCase = feature_extractor(__A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase = np.asarray(__A )
UpperCamelCase = feature_extractor(__A , return_tensors='np' ).input_values
UpperCamelCase = feature_extractor(__A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__A ) == len(__A ) for x, y in zip(__A , processed_features[input_name] ) ) )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__A )
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__A )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.num_mel_bins # hack!
UpperCamelCase = feat_extract.pad(__A , padding='longest' , return_tensors='np' )[input_name]
UpperCamelCase = feat_extract.pad(__A , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**__A )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = [len(__A ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.num_mel_bins # hack!
UpperCamelCase = feat_extract.pad(__A , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __A )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __A )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**__A )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCamelCase = [len(__A ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = min(__A )
UpperCamelCase = feat_extract.num_mel_bins # hack!
UpperCamelCase = feat_extract.pad(
__A , padding='max_length' , max_length=__A , truncation=__A , return_tensors='np' )
self.assertIn('attention_mask' , __A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase = ds.sort('id' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# fmt: off
UpperCamelCase = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = feature_extractor(__A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __A , atol=1e-6 ) )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
# fmt: off
UpperCamelCase = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
UpperCamelCase = self._load_datasamples(1 )
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = feature_extractor(audio_target=__A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __A , atol=1e-4 ) )
| 361 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase : Optional[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_UpperCAmelCase : int = "hopper-medium-v2"
_UpperCAmelCase : Tuple = gym.make(env_name)
_UpperCAmelCase : Any = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_UpperCAmelCase : Optional[Any] = env.reset()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Dict = 1_000
_UpperCAmelCase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase : int = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = env.step(denorm_actions)
_UpperCAmelCase : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 110 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__: Any = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__magic_name__: List[Any] = 10
__magic_name__: Union[str, Any] = 256
def UpperCamelCase ( _A ):
"""simple docstring"""
if len(_A ) < MIN_NUM_TOKENS:
return None
__magic_name__ : List[Any] = MinHash(num_perm=_A )
for token in set(_A ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( _A ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_A ) if len(t.strip() ) > 0}
class snake_case__ :
def __init__( self , *,
lowerCAmelCase__ = 0.8_5 , ) -> Any:
__magic_name__ : Optional[int] = duplication_jaccard_threshold
__magic_name__ : Optional[int] = NUM_PERM
__magic_name__ : Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ : Any = defaultdict(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[List[Dict]]:
__magic_name__ : str = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ : Union[str, Any] = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
__magic_name__ : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Any = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Tuple = element
__magic_name__ : int = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( _A ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(_A, max_queue_size=10000 ), chunksize=100, ):
if data is not None:
yield data
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Any = DuplicationIndex(duplication_jaccard_threshold=_A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_A ) ), max_queue_size=100 ) ):
di.add(_A, _A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = get_tokens(_A )
__magic_name__ : str = get_tokens(_A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__: List[Any] = None
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = []
for elementa in cluster:
__magic_name__ : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__magic_name__ : List[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_A, _A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ : Union[str, Any] = 1
extremes.append(_A )
return extremes
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
global _shared_dataset
__magic_name__ : str = dataset
__magic_name__ : Optional[int] = []
__magic_name__ : Tuple = partial(_find_cluster_extremes_shared, jaccard_threshold=_A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_A, _A, ), total=len(_A ), ):
extremes_list.append(_A )
return extremes_list
def UpperCamelCase ( _A, _A = 0.85 ):
"""simple docstring"""
__magic_name__ : List[Any] = make_duplicate_clusters(_A, _A )
__magic_name__ : List[str] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__magic_name__ : Tuple = {}
__magic_name__ : Union[str, Any] = find_extremes(_A, _A, _A )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ : Tuple = element
__magic_name__ : Tuple = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ : Tuple = dataset.filter(lambda _A, _A : idx not in remove_indices, with_indices=_A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__magic_name__ : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(_A )}' )
print(f'Number of duplicate clusters: {len(_A )}' )
print(f'Files in duplicate cluster: {len(_A )}' )
print(f'Unique files in duplicate cluster: {len(_A )}' )
print(f'Filtered dataset size: {len(_A )}' )
return ds_filter, duplicate_clusters
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
import os
import numpy
import onnx
def UpperCamelCase( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = """"""
snake_case_ = """"""
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_a , _a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _a , _a )
_graph_replace_input_with(node_proto.attribute[1].g , _a , _a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _a , _a )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_a , _a , _a )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _a , _a )
def UpperCamelCase( lowercase_ ) -> List[Any]:
'''simple docstring'''
snake_case_ = os.path.dirname(_a )
snake_case_ = os.path.basename(_a )
snake_case_ = onnx.load(os.path.join(_a , _a ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(_a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_a )
dup_set.add(_a )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _a )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_a )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
snake_case_ = sorted(_a )
_remove_dup_initializers_from_model(_a , _a , _a )
snake_case_ = """optimized_""" + model_file_name
snake_case_ = os.path.join(_a , _a )
onnx.save(_a , _a )
return new_model | 364 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase_ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
lowerCamelCase_ : Dict = 'all_checks'
lowerCamelCase_ : Any = 'basic_checks'
lowerCamelCase_ : Any = 'no_checks'
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase_ ) - set(lowercase_ ) ) )
snake_case_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case_ = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase_ ) > 0:
raise NonMatchingChecksumError(
f'''Checksums didn\'t match{for_verification_name}:\n'''
f'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
snake_case_ = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase_ ) )
logger.info("""All the splits matched successfully.""" )
def UpperCamelCase( lowercase_ , lowercase_ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case_ = shaaaa()
with open(lowercase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(lowercase_ )
snake_case_ = m.hexdigest()
else:
snake_case_ = None
return {"num_bytes": os.path.getsize(lowercase_ ), "checksum": checksum}
def UpperCamelCase( lowercase_ ) -> List[str]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 34 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "gpt_bigcode"
snake_case__ : str = ["past_key_values"]
snake_case__ : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , UpperCAmelCase__ : Any=5_0_2_5_7 , UpperCAmelCase__ : Optional[int]=1_0_2_4 , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str="gelu_pytorch_tanh" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=5_0_2_5_6 , UpperCAmelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : List[str] , ) -> Dict:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scale_attn_weights
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = attention_softmax_in_fpaa
__SCREAMING_SNAKE_CASE = scale_attention_softmax_in_fpaa
__SCREAMING_SNAKE_CASE = multi_query
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 54 |
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 354 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> str:
super().__init__()
A_ : Any = module
A_ : Any = nn.Sequential(
nn.Linear(module.in_features , _lowerCamelCase , bias=_lowerCamelCase ) , nn.Linear(_lowerCamelCase , module.out_features , bias=_lowerCamelCase ) , )
A_ : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase_ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
return self.module(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) + self.adapter(_lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = '''bigscience/bloom-1b7'''
# Constant values
lowerCamelCase = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase = '''Hello my name is'''
lowerCamelCase = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowerCamelCase = 10
def UpperCAmelCase_ ( self ) -> List[str]:
# Models and tokenizer
A_ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# Models and tokenizer
A_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : str = self.model_abit.config
self.assertTrue(hasattr(_lowerCamelCase , """quantization_config""" ) )
A_ : Union[str, Any] = config.to_dict()
A_ : Optional[int] = config.to_diff_dict()
A_ : Tuple = config.to_json_string()
def UpperCAmelCase_ ( self ) -> str:
from bitsandbytes.nn import Paramsabit
A_ : List[Any] = self.model_fpaa.get_memory_footprint()
A_ : Tuple = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase_ ( self ) -> List[str]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : int = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = BitsAndBytesConfig()
A_ : Tuple = True
A_ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCamelCase , device_map="""auto""" )
A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : Optional[Any] = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase_ ( self ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(_lowerCamelCase ):
A_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCamelCase , load_in_abit=_lowerCamelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def UpperCAmelCase_ ( self ) -> str:
with self.assertRaises(_lowerCamelCase ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A_ : Tuple = self.model_fpaa.to(torch.floataa )
A_ : int = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ : Any = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A_ : str = self.model_fpaa.half()
# Check this does not throw an error
A_ : Any = self.model_fpaa.float()
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_lowerCamelCase , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[int]:
A_ : Optional[int] = """t5-small"""
A_ : List[str] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
A_ : Optional[Any] = """Translate in German: Hello, my dog is cute"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
from transformers import TaForConditionalGeneration
A_ : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
A_ : Any = None
# test with `t5-small`
A_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
A_ : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : str = model.generate(**_lowerCamelCase )
A_ : Optional[int] = modules
def UpperCAmelCase_ ( self ) -> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : List[Any] = model.generate(**_lowerCamelCase )
# test with `flan-t5-small`
A_ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
A_ : int = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A_ : Optional[int] = model.generate(**_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> int:
super().setUp()
# model_name
A_ : Dict = """bigscience/bloom-560m"""
A_ : Union[str, Any] = """t5-small"""
# Different types of model
A_ : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# Sequence classification model
A_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# CausalLM model
A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
# Seq2seq model
A_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_lowerCamelCase , device_map="""auto""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
def UpperCAmelCase_ ( self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[str] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
def UpperCAmelCase_ ( self ) -> str:
A_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_lowerCamelCase , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A_ : int = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = """facebook/opt-350m"""
super().setUp()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ : Any = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCamelCase ) ):
A_ : int = LoRALayer(module.q_proj , rank=16 )
A_ : Optional[int] = LoRALayer(module.k_proj , rank=16 )
A_ : Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ : Dict = model.forward(**_lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''gpt2-xl'''
lowerCamelCase = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 164 | 0 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=False, __a=True, __a="None", __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Union[str, Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : List[str] = relative_attention
_lowerCAmelCase : Optional[int] = position_biased_input
_lowerCAmelCase : Union[str, Any] = pos_att_type
_lowerCAmelCase : Dict = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_config()
_lowerCAmelCase : Optional[int] = 300
return config
def snake_case__ ( self, __a):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size()), [])
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = DebertaModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a, attention_mask=__a, token_type_ids=__a)[0]
_lowerCAmelCase : Any = model(__a, token_type_ids=__a)[0]
_lowerCAmelCase : Optional[Any] = model(__a)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Union[str, Any] = model(__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : str = DebertaForSequenceClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[Any] = model(__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(__a)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : List[str] = DebertaForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DebertaForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(
__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = DebertaModelTester(self)
_lowerCAmelCase : str = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = DebertaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base")
_lowerCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]])
_lowerCAmelCase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowerCAmelCase : Dict = model(__a, attention_mask=__a)[0]
# compare the actual values for a slice.
_lowerCAmelCase : Dict = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __a, atol=1E-4), f"{output[:, 1:4, 1:4]}")
| 36 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
a__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **__lowerCamelCase : str ) -> Tuple:
A : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**snake_case__ )
return config
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
A : Optional[int] = self.scheduler_classes[0]
A : int = self.get_scheduler_config()
A : int = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
A : Optional[Any] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config()
A : Optional[Any] = scheduler_class(**snake_case__ )
A : int = len(snake_case__ )
A : str = self.dummy_model()
A : str = self.dummy_sample_deter
A : Dict = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
A : str = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
A : Optional[int] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A : Dict = pred_prev_sample
A : int = torch.sum(torch.abs(snake_case__ ) )
A : Union[str, Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
A : Union[str, Any] = self.scheduler_classes[0]
A : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
A : Any = scheduler_class(**snake_case__ )
A : Optional[int] = len(snake_case__ )
A : int = self.dummy_model()
A : Union[str, Any] = self.dummy_sample_deter
A : Tuple = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
A : Optional[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
A : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A : str = pred_prev_sample
A : Union[str, Any] = torch.sum(torch.abs(snake_case__ ) )
A : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
A : List[str] = self.scheduler_classes[0]
A : Optional[Any] = self.get_scheduler_config()
A : str = scheduler_class(**snake_case__ )
A : Optional[int] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
A : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
A : str = -1
else:
A : Dict = timesteps[i + 1]
A : List[Any] = scheduler.previous_timestep(snake_case__ )
A : Optional[int] = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
A : Union[str, Any] = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config()
A : Optional[int] = scheduler_class(**snake_case__ )
A : Optional[int] = [1_00, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : int = self.scheduler_classes[0]
A : Tuple = self.get_scheduler_config()
A : Tuple = scheduler_class(**snake_case__ )
A : List[str] = [1_00, 87, 50, 1, 0]
A : Any = len(snake_case__ )
with self.assertRaises(snake_case__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
A : Tuple = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config()
A : List[Any] = scheduler_class(**snake_case__ )
A : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case__ ) | 362 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Dict = np.argmax(_lowerCamelCase , axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase ( _lowerCamelCase ):
with open(_lowerCamelCase , encoding="utf_8" ) as f:
A : Dict = csv.reader(_lowerCamelCase )
A : Optional[int] = []
next(_lowerCamelCase ) # skip the first line
for line in tqdm(_lowerCamelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : str = []
for dataset in encoded_datasets:
A : List[str] = len(_lowerCamelCase )
A : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
A : Any = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
A : str = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowerCamelCase ):
A : Optional[int] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A : str = with_conta
A : Any = with_conta
A : Union[str, Any] = len(_lowerCamelCase ) - 1
A : Dict = len(_lowerCamelCase ) - 1
A : int = with_conta
A : int = with_conta
A : int = mc_label
A : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase ( ):
A : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_lowerCamelCase , default="" )
parser.add_argument("--eval_dataset" , type=_lowerCamelCase , default="" )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 )
parser.add_argument("--num_train_epochs" , type=_lowerCamelCase , default=3 )
parser.add_argument("--train_batch_size" , type=_lowerCamelCase , default=8 )
parser.add_argument("--eval_batch_size" , type=_lowerCamelCase , default=16 )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=_lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_lowerCamelCase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_lowerCamelCase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_lowerCamelCase , default=6.25e-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_lowerCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_lowerCamelCase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_lowerCamelCase , default=0.01 )
parser.add_argument("--lm_coef" , type=_lowerCamelCase , default=0.9 )
parser.add_argument("--n_valid" , type=_lowerCamelCase , default=374 )
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
A : List[Any] = parser.parse_args()
print(_lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
A : List[Any] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_lowerCamelCase , _lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A : List[Any] = ["_start_", "_delimiter_", "_classify_"]
A : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowerCamelCase )
A : str = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
A : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowerCamelCase ) )
model.to(_lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return obj
return [tokenize_and_encode(_lowerCamelCase ) for o in obj]
logger.info("Encoding dataset..." )
A : List[str] = load_rocstories_dataset(args.train_dataset )
A : List[str] = load_rocstories_dataset(args.eval_dataset )
A : str = (train_dataset, eval_dataset)
A : Any = tokenize_and_encode(_lowerCamelCase )
# Compute the max input length for the Transformer
A : Any = model.config.n_positions // 2 - 2
A : List[str] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A : Dict = min(_lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A : List[Any] = pre_process_datasets(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase )
A , A : Dict = tensor_datasets[0], tensor_datasets[1]
A : Union[str, Any] = TensorDataset(*_lowerCamelCase )
A : Dict = RandomSampler(_lowerCamelCase )
A : Dict = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.train_batch_size )
A : int = TensorDataset(*_lowerCamelCase )
A : List[str] = SequentialSampler(_lowerCamelCase )
A : Dict = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A : Any = args.max_steps
A : str = args.max_steps // (len(_lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
A : Optional[int] = len(_lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
A : str = list(model.named_parameters() )
A : Tuple = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
A : List[Any] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
A : Optional[Any] = AdamW(_lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
A : Any = get_linear_schedule_with_warmup(
_lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowerCamelCase )
if args.do_train:
A , A , A : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
A : List[Any] = 0
A : Any = 0
A : Dict = tqdm(_lowerCamelCase , desc="Training" )
for step, batch in enumerate(_lowerCamelCase ):
A : List[str] = tuple(t.to(_lowerCamelCase ) for t in batch )
A , A , A , A : Any = batch
A : Any = model(_lowerCamelCase , mc_token_ids=_lowerCamelCase , lm_labels=_lowerCamelCase , mc_labels=_lowerCamelCase )
A : Any = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A : Dict = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A : Optional[int] = "Training loss: {:.2e} lr: {:.2e}".format(_lowerCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A : Tuple = model.module if hasattr(_lowerCamelCase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A : Dict = os.path.join(args.output_dir , _lowerCamelCase )
A : Any = os.path.join(args.output_dir , _lowerCamelCase )
torch.save(model_to_save.state_dict() , _lowerCamelCase )
model_to_save.config.to_json_file(_lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowerCamelCase )
if args.do_eval:
model.eval()
A , A : Union[str, Any] = 0, 0
A , A : List[Any] = 0, 0
for batch in tqdm(_lowerCamelCase , desc="Evaluating" ):
A : List[str] = tuple(t.to(_lowerCamelCase ) for t in batch )
A , A , A , A : Optional[int] = batch
with torch.no_grad():
A , A , A , A : Any = model(
_lowerCamelCase , mc_token_ids=_lowerCamelCase , lm_labels=_lowerCamelCase , mc_labels=_lowerCamelCase )
A : Dict = mc_logits.detach().cpu().numpy()
A : Union[str, Any] = mc_labels.to("cpu" ).numpy()
A : Union[str, Any] = accuracy(_lowerCamelCase , _lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A : Union[str, Any] = eval_loss / nb_eval_steps
A : Tuple = eval_accuracy / nb_eval_examples
A : Optional[int] = tr_loss / nb_tr_steps if args.do_train else None
A : Dict = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
A : Optional[Any] = os.path.join(args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _lowerCamelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 256 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = SwinConfig(image_size=192 )
if "base" in model_name:
_lowerCamelCase : Optional[int] = 6
_lowerCamelCase : int = 128
_lowerCamelCase : List[Any] = (2, 2, 18, 2)
_lowerCamelCase : Optional[int] = (4, 8, 16, 32)
elif "large" in model_name:
_lowerCamelCase : int = 12
_lowerCamelCase : str = 192
_lowerCamelCase : Tuple = (2, 2, 18, 2)
_lowerCamelCase : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : int = embed_dim
_lowerCamelCase : Optional[Any] = depths
_lowerCamelCase : Any = num_heads
return config
def _snake_case ( lowercase__ ):
if "encoder.mask_token" in name:
_lowerCamelCase : Dict = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
_lowerCamelCase : Any = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
_lowerCamelCase : List[str] = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
_lowerCamelCase : Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCamelCase : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCamelCase : Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : int = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowerCamelCase : int = 'layernorm.weight'
if name == "encoder.norm.bias":
_lowerCamelCase : List[str] = 'layernorm.bias'
if "decoder" in name:
pass
else:
_lowerCamelCase : Union[str, Any] = 'swin.' + name
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Any = orig_state_dict.pop(lowercase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_lowerCamelCase : List[Any] = key.split('.' )
_lowerCamelCase : Dict = int(key_split[2] )
_lowerCamelCase : List[Any] = int(key_split[4] )
_lowerCamelCase : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : Optional[Any] = val[:dim, :]
_lowerCamelCase : Any = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
_lowerCamelCase : str = val[
:dim
]
_lowerCamelCase : Optional[int] = val[
dim : dim * 2
]
_lowerCamelCase : List[Any] = val[
-dim:
]
else:
_lowerCamelCase : Optional[int] = val
return orig_state_dict
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = torch.load(lowercase__ , map_location='cpu' )['model']
_lowerCamelCase : Optional[Any] = get_swin_config(lowercase__ )
_lowerCamelCase : Optional[int] = SwinForMaskedImageModeling(lowercase__ )
model.eval()
_lowerCamelCase : List[Any] = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
_lowerCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : int = ViTImageProcessor(size={'height': 192, 'width': 192} )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
_lowerCamelCase : Union[str, Any] = image_processor(images=lowercase__ , return_tensors='pt' )
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**lowercase__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 96 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# source code of `config_class`
lowercase__ = inspect.getsource(SCREAMING_SNAKE_CASE )
lowercase__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowercase__ = ckpt_name
break
return checkpoint
def _a ( ):
"""simple docstring"""
lowercase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
lowercase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 110 | 0 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = [0] * len(a__ )
for i in range(1 , len(a__ ) ):
# use last results for better performance - dynamic programming
lowercase__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : int = j
return prefix_result
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return max(prefix_function(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.dummy_uncond_unet
lowercase__ : Dict = DDIMScheduler()
lowercase__ : Optional[Any] = self.dummy_vq_model
lowercase__ : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" ).images
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase__ : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="numpy" ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Optional[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase__ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 121 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.