code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _a ( a :Optional[int] , a :tuple , a :Path , a :int , a :Optional[Any] , a :Any , a :Union[str, Any] , a :int=False , ) -> Union[str, Any]:
output_path.parent.mkdir(parents=a , exist_ok=a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , use_external_data_format=a , enable_onnx_checker=a , opset_version=a , )
else:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , opset_version=a , )
@torch.no_grad()
def _a ( a :str , a :str , a :int , a :bool = False ) -> List[Any]:
a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
a = '''cpu'''
a = Path(a )
# VAE DECODER
a = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
a = vae_decoder.config.latent_channels
# forward only through the decoder part
a = vae_decoder.decode
onnx_export(
a , model_args=(
torch.randn(1 , a , 25 , 25 ).to(device=a , dtype=a ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_ = 4
UpperCAmelCase_ = 48
UpperCAmelCase_ = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ = [6, 6, 6, 6]
UpperCAmelCase_ = 60
UpperCAmelCase_ = [6, 6, 6, 6]
UpperCAmelCase_ = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ = 4
UpperCAmelCase_ = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1_26
UpperCAmelCase_ = 7
UpperCAmelCase_ = 255.0
UpperCAmelCase_ = ""
return config
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
UpperCAmelCase_ = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
UpperCAmelCase_ = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCAmelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCAmelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCAmelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCAmelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
UpperCAmelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCAmelCase_ = "layernorm.bias"
if "conv_first" in name:
UpperCAmelCase_ = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_ = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_ = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
UpperCAmelCase_ = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
UpperCAmelCase_ = name.replace("upsample.2" , "upsample.convolution_1" )
UpperCAmelCase_ = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_ = name.replace("upsample.0.weight" , "upsample.conv.weight" )
UpperCAmelCase_ = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
UpperCAmelCase_ = "swin2sr." + name
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[1] )
UpperCAmelCase_ = int(key_split[4] )
UpperCAmelCase_ = config.embed_dim
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
pass
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
UpperCAmelCase_ = SwinaSRForImageSuperResolution(snake_case_ )
model.eval()
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError("Missing keys when converting: {}".format(snake_case_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_ = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_ = 1_26 if "Jpeg" in checkpoint_url else 2_56
UpperCAmelCase_ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_ = transforms(snake_case_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_ = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_ = model(snake_case_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_ = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_ = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case_ , atol=1E-3 )
print("Looks ok!" )
UpperCAmelCase_ = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
UpperCAmelCase_ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _SCREAMING_SNAKE_CASE (A , A , A ) -> list[int]:
"""simple docstring"""
lowercase__ = [0] * no_of_processes
lowercase__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(A ):
lowercase__ = burst_time[i]
lowercase__ = []
lowercase__ = 0
lowercase__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase__ = []
lowercase__ = -1
for i in range(A ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(A )
if len(A ) > 0:
lowercase__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase__ = i
total_time += burst_time[target_process]
completed += 1
lowercase__ = 0
lowercase__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE (A , A , A ) -> list[int]:
"""simple docstring"""
lowercase__ = [0] * no_of_processes
for i in range(A ):
lowercase__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : Optional[Any] = [2, 5, 3, 7]
lowerCamelCase : str = [0, 0, 0, 0]
lowerCamelCase : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 2 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
A : Any = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
A : Union[str, Any] = current_sum - array[i] + array[i + k]
A : List[Any] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase : int = [randint(-10_00, 10_00) for i in range(1_00)]
lowercase : List[str] = randint(0, 1_10)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 3 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = '''roberta'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple=5_0_2_6_5 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : int=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=5_1_2 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Optional[Any]=1E-12 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[str]="absolute" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : List[Any] , ) -> Any:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 4 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=None ) -> Tuple:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
_lowercase =nn.Parameter(__snake_case )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
_lowercase =nn.Parameter(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =np.asarray(weights[0] )
_lowercase =np.asarray(weights[1] )
_lowercase =np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> int:
"""simple docstring"""
_lowercase =np.asarray(weights[0] )
_lowercase =np.asarray(weights[1] )
_lowercase =np.asarray(weights[2] )
_lowercase =np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__snake_case ).transpose(1 , 2 ).contiguous().view(-1 , __snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(__snake_case ).view(-1 , __snake_case ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =weights[0][0][0]
_lowercase =np.asarray(layer_norm_a[0] )
_lowercase =np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# lsh weights + output
_lowercase =weights[0][1]
if len(__snake_case ) < 4:
set_layer_weights_in_torch_lsh(__snake_case , torch_block.attention , __snake_case )
else:
set_layer_weights_in_torch_local(__snake_case , torch_block.attention , __snake_case )
# intermediate weighs
_lowercase =weights[2][0][1][2]
# Chunked Feed Forward
if len(__snake_case ) == 4:
_lowercase =intermediate_weights[2]
# layernorm 2
_lowercase =np.asarray(intermediate_weights[0][0] )
_lowercase =np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# intermediate dense
_lowercase =np.asarray(intermediate_weights[1][0] )
_lowercase =np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
# intermediate out
_lowercase =np.asarray(intermediate_weights[4][0] )
_lowercase =np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =torch_model.reformer
# word embeds
_lowercase =np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__snake_case ) , )
if isinstance(weights[3] , __snake_case ):
_lowercase =torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowercase =np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
_lowercase =nn.Parameter(torch.tensor(__snake_case ) )
_lowercase =weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__snake_case ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowercase =trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__snake_case , __snake_case , __snake_case )
# output layer norm
_lowercase =np.asarray(weights[7][0] )
_lowercase =np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) , )
# output embeddings
_lowercase =np.asarray(weights[9][0] )
_lowercase =np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(__snake_case ) , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Any:
"""simple docstring"""
_lowercase =ReformerConfig.from_json_file(__snake_case )
print(F"Building PyTorch model from configuration: {config}" )
_lowercase =ReformerModelWithLMHead(__snake_case )
with open(__snake_case , '''rb''' ) as f:
_lowercase =pickle.load(__snake_case )['''weights''']
set_model_weights_in_torch(__snake_case , __snake_case , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,)
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = GPTNeoXModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = True
A__ = GPTNeoXModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = True
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3),config.vocab_size )
A__ = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens],dim=-1 )
A__ = torch.cat([input_mask, next_mask],dim=-1 )
A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,),output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 1_0],config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
A__ = original_model(lowercase_ ).last_hidden_state
A__ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 10.0}
A__ = GPTNeoXModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
A__ = scaled_model(lowercase_ ).last_hidden_state
A__ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase_ )
A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 )
A__ = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_,lowercase_ )
| 7 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowercase ( self : Dict , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ):
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# Checks if the entire collection has been sorted
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# Checks order between adjacent elements
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
__lowerCAmelCase : Any =input('Enter integers separated by spaces: ')
__lowerCAmelCase : list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 9 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "audio-spectrogram-transformer"
def __init__(self : Optional[Any] , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : int=1_024 , UpperCAmelCase_ : str=128 , **UpperCAmelCase_ : Optional[Any] , ) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: int =hidden_size
lowerCamelCase__: Dict =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Tuple =hidden_act
lowerCamelCase__: Optional[int] =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: Tuple =layer_norm_eps
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[str] =qkv_bias
lowerCamelCase__: Optional[Any] =frequency_stride
lowerCamelCase__: int =time_stride
lowerCamelCase__: Optional[Any] =max_length
lowerCamelCase__: Union[str, Any] =num_mel_bins
| 10 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "git_vision_model"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase="quick_gelu" , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , **__lowerCamelCase , ) -> Any:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : int = intermediate_size
_A : int = num_hidden_layers
_A : Dict = num_attention_heads
_A : Dict = num_channels
_A : List[Any] = patch_size
_A : str = image_size
_A : str = initializer_range
_A : int = attention_dropout
_A : Tuple = layer_norm_eps
_A : Dict = hidden_act
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase , **__lowerCamelCase) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase)
_A , _A : Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type") == "git":
_A : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "git"
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=6 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=1_0_1 , __lowerCamelCase=1_0_2 , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase)
if vision_config is None:
_A : List[str] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
_A : str = GitVisionConfig(**__lowerCamelCase)
_A : Tuple = vocab_size
_A : str = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Optional[Any] = hidden_act
_A : str = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : List[Any] = initializer_range
_A : List[Any] = layer_norm_eps
_A : Union[str, Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : int = tie_word_embeddings
_A : int = num_image_with_embedding
_A : Optional[Any] = bos_token_id
_A : str = eos_token_id
def _lowerCamelCase ( self) -> Dict:
_A : str = copy.deepcopy(self.__dict__)
_A : Any = self.vision_config.to_dict()
_A : Tuple = self.__class__.model_type
return output
| 11 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = VideoToVideoSDPipeline
UpperCAmelCase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'}) - {'image', 'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'}) - {'image'}
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
UpperCAmelCase__ : Any = False
# No `output_type`.
UpperCAmelCase__ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
])
def lowerCAmelCase__ ( self: Optional[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict=0 ):
# 3 frames
__lowerCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = VideoToVideoSDPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = """np"""
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).frames
__lowerCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__lowerCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCAmelCase__ ( self: Tuple ):
pass
def lowerCAmelCase__ ( self: int ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=UpperCamelCase_ )
__lowerCamelCase = video.to("""cuda""" )
__lowerCamelCase = """Spiderman is surfing"""
__lowerCamelCase = pipe(UpperCamelCase_ , video=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=3 , output_type="""pt""" ).frames
__lowerCamelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 12 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def A_ ( _UpperCAmelCase ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_: Optional[Any] = k.replace(_UpperCAmelCase , _UpperCAmelCase )
if k.startswith("encoder" ):
SCREAMING_SNAKE_CASE_: int = k.replace(".attn" , ".self_attn" )
SCREAMING_SNAKE_CASE_: Any = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE_: Any = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
SCREAMING_SNAKE_CASE_: List[Any] = k.replace("norm1" , "self_attn_layer_norm" )
SCREAMING_SNAKE_CASE_: Any = k.replace("norm2" , "encoder_attn_layer_norm" )
SCREAMING_SNAKE_CASE_: int = k.replace("norm3" , "final_layer_norm" )
return k
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
SCREAMING_SNAKE_CASE_: List[str] = sd.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
SCREAMING_SNAKE_CASE_: str = v
lowerCAmelCase : str = ["""START"""]
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_: Union[str, Any] = model["model"]
SCREAMING_SNAKE_CASE_: List[str] = BlenderbotConfig.from_json_file(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = BlenderbotForConditionalGeneration(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE_: str = rename_state_dict_key(_UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE_: Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCAmelCase )
m.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
m.half()
m.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowerCAmelCase : Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 13 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
debug_launcher(test_script.main )
def UpperCamelCase_ ( self : Optional[Any] ):
debug_launcher(test_ops.main )
| 15 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ : Any = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase__ : str = value
else:
lowercase__ : Optional[Any] = value
return new_state_dict
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Tuple = in_proj_weight[:2_56, :]
lowercase__ : Union[str, Any] = in_proj_bias[:2_56]
lowercase__ : List[str] = in_proj_weight[2_56:5_12, :]
lowercase__ : Any = in_proj_bias[2_56:5_12]
lowercase__ : Any = in_proj_weight[-2_56:, :]
lowercase__ : Union[str, Any] = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Any = in_proj_weight[:2_56, :]
lowercase__ : str = in_proj_bias[:2_56]
lowercase__ : Union[str, Any] = in_proj_weight[2_56:5_12, :]
lowercase__ : int = in_proj_bias[2_56:5_12]
lowercase__ : Optional[Any] = in_proj_weight[-2_56:, :]
lowercase__ : str = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ : Optional[Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ : int = in_proj_weight_cross_attn[:2_56, :]
lowercase__ : Dict = in_proj_bias_cross_attn[:2_56]
lowercase__ : Any = in_proj_weight_cross_attn[2_56:5_12, :]
lowercase__ : Dict = in_proj_bias_cross_attn[2_56:5_12]
lowercase__ : Dict = in_proj_weight_cross_attn[-2_56:, :]
lowercase__ : Dict = in_proj_bias_cross_attn[-2_56:]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ , lowercase__ : Tuple = image.size
lowercase__ : Optional[Any] = max(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[Any] = 8_00 if '''detection''' in checkpoint_url else 10_00
lowercase__ : Any = target_max_size / current_max_size
lowercase__ : Optional[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : List[Any] = F.to_tensor(__lowerCamelCase )
lowercase__ : Dict = F.normalize(__lowerCamelCase , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
logger.info('''Converting model...''' )
# load original state dict
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : int = rename_backbone_keys(__lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ : Tuple = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
lowercase__ : str = val
# create HuggingFace model and load state dict
lowercase__ : Dict = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ : int = 15
lowercase__ : int = 2
lowercase__ : Any = {0: '''table''', 1: '''table rotated'''}
lowercase__ : int = idalabel
lowercase__ : int = {v: k for k, v in idalabel.items()}
else:
lowercase__ : List[Any] = 1_25
lowercase__ : Optional[Any] = 6
lowercase__ : Dict = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
lowercase__ : List[str] = TableTransformerForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify our conversion
lowercase__ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__lowerCamelCase )
lowercase__ : List[Any] = Image.open(__lowerCamelCase ).convert('''RGB''' )
lowercase__ : Any = normalize(resize(__lowerCamelCase , __lowerCamelCase ) ).unsqueeze(0 )
lowercase__ : int = model(__lowerCamelCase )
if "detection" in checkpoint_url:
lowercase__ : List[Any] = (1, 15, 3)
lowercase__ : Dict = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
lowercase__ : Any = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
lowercase__ : Optional[Any] = (1, 1_25, 7)
lowercase__ : int = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
lowercase__ : Tuple = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
lowercase__ : Optional[int] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__lowerCamelCase )
image_processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 16 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A ( UpperCamelCase_ : dict[int, list[int]]) -> list[tuple[int, int]]:
'''simple docstring'''
__lowercase = 0
__lowercase = len(UpperCamelCase_) # No of vertices in graph
__lowercase = [0] * n
__lowercase = [False] * n
def dfs(UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[int], UpperCamelCase_ : Any, UpperCamelCase_ : Optional[Any]):
__lowercase = True
__lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, id_)
__lowercase = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
__lowercase = min(low[at], low[to])
__lowercase = []
for i in range(UpperCamelCase_):
if not visited[i]:
dfs(UpperCamelCase_, -1, UpperCamelCase_, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 | 0 |
import mpmath # for roots of unity
import numpy as np
class a__ :
def __init__( self : int,_A : Optional[int]=None,_A : Any=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE_ : Tuple = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE_ : List[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE_ : List[str] = complex(mpmath.root(x=1,n=self.c_max_length,k=1 ) )
# The product
SCREAMING_SNAKE_CASE_ : List[Any] = self.__multiply()
def __UpperCamelCase ( self : str,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_A ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = [[] for i in range(_A )]
SCREAMING_SNAKE_CASE_ : str = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE_ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE_ : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE_ : Dict = new_dft
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_ncol // 2
return dft[0]
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.__dft("A" )
SCREAMING_SNAKE_CASE_ : Any = self.__dft("B" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[] for i in range(_A )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE_ : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE_ : Dict = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE_ : int = [round(x[0].real,8 ) + round(x[0].imag,8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "A = " + " + ".join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE_ : int = "B = " + " + ".join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = "A*B = " + " + ".join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self , lowercase = 128 , lowercase = 256 , lowercase = 2_0_0_0.0 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 64 , lowercase = 2048 , lowercase = 0.1 , ) -> str:
super().__init__()
lowerCamelCase_ = nn.Sequential(
nn.Linear(lowercase , d_model * 4 , bias=lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase ) , nn.SiLU() , )
lowerCamelCase_ = nn.Embedding(lowercase , lowercase )
lowerCamelCase_ = False
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Dropout(p=lowercase )
lowerCamelCase_ = nn.ModuleList()
for lyr_num in range(lowercase ):
# FiLM conditional T5 decoder
lowerCamelCase_ = DecoderLayer(d_model=lowercase , d_kv=lowercase , num_heads=lowercase , d_ff=lowercase , dropout_rate=lowercase )
self.decoders.append(lowercase )
lowerCamelCase_ = TaLayerNorm(lowercase )
lowerCamelCase_ = nn.Dropout(p=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase_ = self.conditioning_emb(lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase_ = torch.broadcast_to(
torch.arange(lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase_ = self.position_encoding(lowercase )
lowerCamelCase_ = self.continuous_inputs_projection(lowercase )
inputs += position_encodings
lowerCamelCase_ = self.dropout(lowercase )
# decoder: No padding present.
lowerCamelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase_ = [(x, self.encoder_decoder_mask(lowercase , lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase_ = lyr(
lowercase , conditioning_emb=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )[0]
lowerCamelCase_ = self.decoder_norm(lowercase )
lowerCamelCase_ = self.post_dropout(lowercase )
lowerCamelCase_ = self.spec_out(lowercase )
return spec_out
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=1e-6 ) -> Tuple:
super().__init__()
lowerCamelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase , d_kv=lowercase , num_heads=lowercase , dropout_rate=lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase , d_kv=lowercase , num_heads=lowercase , dropout_rate=lowercase , layer_norm_epsilon=lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase , d_ff=lowercase , dropout_rate=lowercase , layer_norm_epsilon=lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> List[Any]:
lowerCamelCase_ = self.layer[0](
lowercase , conditioning_emb=lowercase , attention_mask=lowercase , )
if encoder_hidden_states is not None:
lowerCamelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowerCamelCase_ = self.layer[1](
lowercase , key_value_states=lowercase , attention_mask=lowercase , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase_ = self.layer[-1](lowercase , lowercase )
return (hidden_states,)
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
super().__init__()
lowerCamelCase_ = TaLayerNorm(lowercase )
lowerCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase )
lowerCamelCase_ = Attention(query_dim=lowercase , heads=lowercase , dim_head=lowercase , out_bias=lowercase , scale_qk=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , ) -> Optional[int]:
# pre_self_attention_layer_norm
lowerCamelCase_ = self.layer_norm(lowercase )
if conditioning_emb is not None:
lowerCamelCase_ = self.FiLMLayer(lowercase , lowercase )
# Self-attention block
lowerCamelCase_ = self.attention(lowercase )
lowerCamelCase_ = hidden_states + self.dropout(lowercase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
super().__init__()
lowerCamelCase_ = Attention(query_dim=lowercase , heads=lowercase , dim_head=lowercase , out_bias=lowercase , scale_qk=lowercase )
lowerCamelCase_ = TaLayerNorm(lowercase , eps=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , ) -> Dict:
lowerCamelCase_ = self.layer_norm(lowercase )
lowerCamelCase_ = self.attention(
lowercase , encoder_hidden_states=lowercase , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase_ = hidden_states + self.dropout(lowercase )
return layer_output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
super().__init__()
lowerCamelCase_ = TaDenseGatedActDense(d_model=lowercase , d_ff=lowercase , dropout_rate=lowercase )
lowerCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase )
lowerCamelCase_ = TaLayerNorm(lowercase , eps=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> Optional[Any]:
lowerCamelCase_ = self.layer_norm(lowercase )
if conditioning_emb is not None:
lowerCamelCase_ = self.film(lowercase , lowercase )
lowerCamelCase_ = self.DenseReluDense(lowercase )
lowerCamelCase_ = hidden_states + self.dropout(lowercase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase ) -> List[Any]:
super().__init__()
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
lowerCamelCase_ = NewGELUActivation()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
lowerCamelCase_ = self.act(self.wi_a(lowercase ) )
lowerCamelCase_ = self.wi_a(lowercase )
lowerCamelCase_ = hidden_gelu * hidden_linear
lowerCamelCase_ = self.dropout(lowercase )
lowerCamelCase_ = self.wo(lowercase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase=1e-6 ) -> Tuple:
super().__init__()
lowerCamelCase_ = nn.Parameter(torch.ones(lowercase ) )
lowerCamelCase_ = eps
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCamelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase )
lowerCamelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowercase , 3.0 )) ))
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
super().__init__()
lowerCamelCase_ = nn.Linear(lowercase , out_features * 2 , bias=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = self.scale_bias(lowercase )
lowerCamelCase_ , lowerCamelCase_ = torch.chunk(lowercase , 2 , -1 )
lowerCamelCase_ = x * (1 + scale) + shift
return x
| 19 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowercase ( self : int ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE :Dict = 1e-4
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : int ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 22 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase : Any = CLIPImageProcessor()
lowercase : Any = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowercase : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 20 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase__: List[str] = logging.get_logger("transformers.models.speecht5")
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
hf_model.apply_weight_norm()
UpperCAmelCase : List[Any] = checkpoint['''input_conv.weight_g''']
UpperCAmelCase : Optional[int] = checkpoint['''input_conv.weight_v''']
UpperCAmelCase : Any = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase : str = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase : Optional[int] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase : str = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase : str = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase : Optional[int] = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase : Union[str, Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase : Tuple = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase : Union[str, Any] = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase : Dict = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None , ) -> List[Any]:
if config_path is not None:
UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig()
UpperCAmelCase : Dict = SpeechTaHifiGan(_lowerCAmelCase )
UpperCAmelCase : List[str] = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint['''model''']['''generator'''] , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Any = np.load(_lowerCAmelCase )
UpperCAmelCase : Dict = stats[0].reshape(-1 )
UpperCAmelCase : Tuple = stats[1].reshape(-1 )
UpperCAmelCase : Tuple = torch.from_numpy(_lowerCAmelCase ).float()
UpperCAmelCase : List[str] = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: List[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase__: Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 23 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( snake_case_ : Callable[[int | float], int | float] , snake_case_ : int | float , snake_case_ : int | float , snake_case_ : int = 100 , ) -> float:
__snake_case = x_start
__snake_case = fnc(snake_case_ )
__snake_case = 0.0
for _ in range(snake_case_ ):
# Approximates curve as a sequence of linear lines and sums their length
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(snake_case_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__snake_case = xa
__snake_case = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> int:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
snake_case_ = 10
while i <= 100000:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 24 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''trajectory_transformer'''
__UpperCamelCase : str = ['''past_key_values''']
__UpperCamelCase : Optional[Any] = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2_49 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=17 , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0006 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = action_weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reward_weight
SCREAMING_SNAKE_CASE__ : List[Any] = value_weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = block_size
SCREAMING_SNAKE_CASE__ : Tuple = action_dim
SCREAMING_SNAKE_CASE__ : List[str] = observation_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transition_dim
SCREAMING_SNAKE_CASE__ : List[str] = learning_rate
SCREAMING_SNAKE_CASE__ : Dict = n_layer
SCREAMING_SNAKE_CASE__ : Dict = n_head
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_embd
SCREAMING_SNAKE_CASE__ : str = embd_pdrop
SCREAMING_SNAKE_CASE__ : int = attn_pdrop
SCREAMING_SNAKE_CASE__ : Any = resid_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ : int = use_cache
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 25 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
"good first issue",
"feature request",
"wip",
]
def lowerCAmelCase_ ( ):
_A : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Union[str, Any] = g.get_repo("""huggingface/accelerate""" )
_A : List[Any] = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Tuple = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : int = comments[0] if len(snake_case_ ) > 0 else None
_A : Dict = dt.utcnow()
_A : Union[str, Any] = (current_time - issue.updated_at).days
_A : Union[str, Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a : List[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__a : List[Any] = TensorFlowBenchmark(__a )
__a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'sgugger/tiny-distilbert-classification'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
__a : Dict = TensorFlowBenchmark(__a )
__a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = AutoConfig.from_pretrained(__a )
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__a : Tuple = TensorFlowBenchmark(__a , [config] )
__a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Optional[int] = AutoConfig.from_pretrained(__a )
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Tuple = TensorFlowBenchmark(__a , [config] )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = 'sshleifer/tiny-gpt2'
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'sshleifer/tiny-gpt2'
__a : Tuple = AutoConfig.from_pretrained(__a )
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = TensorFlowBenchmark(__a , [config] )
__a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'patrickvonplaten/t5-tiny-random'
__a : Optional[int] = AutoConfig.from_pretrained(__a )
__a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = TensorFlowBenchmark(__a , configs=[config] )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
__a : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
__a : int = TensorFlowBenchmark(__a )
__a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
__a : int = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
__a : Any = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 27 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowerCamelCase : Optional[int] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """facebook/nllb-200-distilled-600M"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
_SCREAMING_SNAKE_CASE = """translator"""
_SCREAMING_SNAKE_CASE = AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE = LANGUAGE_CODES
_SCREAMING_SNAKE_CASE = ["""text""", """text""", """text"""]
_SCREAMING_SNAKE_CASE = ["""text"""]
def A ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
UpperCamelCase = self.lang_to_code[src_lang]
UpperCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors='pt' , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
return self.model.generate(**UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__ )
| 28 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = '''distilbert'''
_snake_case : Dict = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=5_1_2 , _UpperCamelCase=False , _UpperCamelCase=6 , _UpperCamelCase=1_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=4 * 7_6_8 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=0.02 , _UpperCamelCase=0.1 , _UpperCamelCase=0.2 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Tuple = sinusoidal_pos_embds
UpperCAmelCase_ : Tuple = n_layers
UpperCAmelCase_ : Optional[int] = n_heads
UpperCAmelCase_ : Optional[int] = dim
UpperCAmelCase_ : str = hidden_dim
UpperCAmelCase_ : Tuple = dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Tuple = qa_dropout
UpperCAmelCase_ : List[str] = seq_classif_dropout
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 29 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a ( snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = tokenizer(example['''content'''] , truncation=snake_case__ )['''input_ids''']
lowercase_ = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 30 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowercase ( self : Dict , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ):
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 22 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCamelCase: ClassVar[Features] = Features({"audio": Audio()} )
__UpperCamelCase: ClassVar[Features] = Features({"labels": ClassLabel} )
__UpperCamelCase: str = "audio"
__UpperCamelCase: str = "labels"
def _A ( self : int , A : List[Any] ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCAmelCase : Optional[int] = copy.deepcopy(self )
_UpperCAmelCase : List[str] = self.label_schema.copy()
_UpperCAmelCase : List[str] = features[self.label_column]
_UpperCAmelCase : str = label_schema
return task_template
@property
def _A ( self : int ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
UpperCAmelCase_ : Tuple = 'bert-base-cased'
UpperCAmelCase_ : str = 'google/pegasus-xsum'
UpperCAmelCase_ : int = [' Sam ate lunch today.', 'Sams lunch ingredients.']
UpperCAmelCase_ : Optional[Any] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
UpperCAmelCase_ : str = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase_ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase_ : Optional[Any] = 'sshleifer/tiny-mbart'
UpperCAmelCase_ : Optional[int] = 'sshleifer/tiny-marian-en-de'
def SCREAMING_SNAKE_CASE_ ( __A : Path , __A : list ) -> Tuple:
"""simple docstring"""
a_ : List[str] = '\n'.join(__A )
Path(__A ).open('w' ).writelines(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> List[Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A )
_dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A )
return tmp_dir
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES )
a_ : Tuple = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES )
a_ : Optional[Any] = 4
a_ : str = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
a_ , a_ : str = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
a_ : str = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
a_ : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES )
a_ : int = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES )
a_ : List[Any] = 4
a_ : int = LegacySeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=2_0 , max_target_length=SCREAMING_SNAKE_CASE__ , )
a_ : Any = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
a_ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
a_ : Union[str, Any] = tmp_dir.joinpath('train.source' ).open().readlines()
a_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1_2_8 , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = {x.name for x in tmp_dir.iterdir()}
a_ : Optional[Any] = {x.name for x in save_dir.iterdir()}
a_ : Optional[Any] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 1
assert len(packed_examples[0] ) == sum(len(SCREAMING_SNAKE_CASE__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
if not FAIRSEQ_AVAILABLE:
return
a_ , a_ , a_ : Optional[int] = self._get_dataset(max_len=6_4 )
a_ : Tuple = 6_4
a_ : Any = ds.make_dynamic_sampler(SCREAMING_SNAKE_CASE__ , required_batch_size_multiple=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = [len(SCREAMING_SNAKE_CASE__ ) for x in batch_sampler]
assert len(set(SCREAMING_SNAKE_CASE__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) # no dropped or added examples
a_ : Dict = DataLoader(SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 )
a_ : Tuple = []
a_ : Optional[int] = []
for batch in data_loader:
a_ : Any = batch['input_ids'].shape
a_ : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
a_ : List[Any] = np.product(batch['input_ids'].shape )
num_src_per_batch.append(SCREAMING_SNAKE_CASE__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(SCREAMING_SNAKE_CASE__ )
assert num_src_per_batch[0] == max(SCREAMING_SNAKE_CASE__ )
if failures:
raise AssertionError(F"""too many tokens in {len(SCREAMING_SNAKE_CASE__ )} batches""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ , a_ , a_ : Union[str, Any] = self._get_dataset(max_len=5_1_2 )
a_ : Union[str, Any] = 2
a_ : Tuple = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 )
a_ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any="input_ids" ):
return [batch[k].eq(SCREAMING_SNAKE_CASE__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) )
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=1_0_0_0 , SCREAMING_SNAKE_CASE__ : Any=1_2_8 ) -> Dict:
if os.getenv('USE_REAL_DATA' , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = 'examples/seq2seq/wmt_en_ro'
a_ : Dict = max_len * 2 * 6_4
if not Path(SCREAMING_SNAKE_CASE__ ).joinpath('train.len' ).exists():
save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
a_ : Any = 'examples/seq2seq/test_data/wmt_en_ro'
a_ : Tuple = max_len * 4
save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ , a_ , a_ : Optional[int] = self._get_dataset()
a_ : Optional[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) )
a_ : List[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) )
assert idsa.intersection(SCREAMING_SNAKE_CASE__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> str:
a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
if tok_name == MBART_TINY:
a_ : Dict = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
a_ : Dict = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
a_ : Any = SeqaSeqDataset(
SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
a_ : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(SCREAMING_SNAKE_CASE__ ) == 1 if tok_name == BART_TINY else len(SCREAMING_SNAKE_CASE__ ) == 0
| 32 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Optional[int] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
try:
with open(_lowerCAmelCase , """rb""" ) as flax_state_f:
snake_case__ : Any = from_bytes(_lowerCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCAmelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
snake_case__ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values()
if any(_lowerCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
snake_case__ : Optional[Any] = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase )
snake_case__ : Optional[int] = """"""
snake_case__ : Any = flatten_dict(_lowerCAmelCase , sep=""".""" )
snake_case__ : Union[str, Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
snake_case__ : Any = []
snake_case__ : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case__ : str = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
snake_case__ : Dict = flax_key_tuple_array[:-1] + ["""weight"""]
snake_case__ : List[Any] = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
snake_case__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
snake_case__ : Dict = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
snake_case__ : Dict = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCAmelCase ):
snake_case__ : int = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
snake_case__ : List[Any] = """.""".join(_lowerCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
snake_case__ : Tuple = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor
snake_case__ : Optional[int] = torch.from_numpy(_lowerCAmelCase )
# remove from missing keys
missing_keys.remove(_lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCAmelCase )
pt_model.load_state_dict(_lowerCAmelCase )
# re-transform missing_keys to list
snake_case__ : Tuple = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(_lowerCAmelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 35 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 0 |
from maths.prime_factors import prime_factors
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_lowerCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''▁'''
_lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
_lowerCAmelCase = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
_lowerCAmelCase = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
_lowerCAmelCase = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
_lowerCAmelCase = {'''mustc''': MUSTC_LANGS}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = MAX_MODEL_INPUT_SIZES
__lowercase : Any = ['''input_ids''', '''attention_mask''']
__lowercase : List[int] = []
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,do_upper_case=__UpperCAmelCase ,do_lower_case=__UpperCAmelCase ,tgt_lang=__UpperCAmelCase ,lang_codes=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[str] = do_upper_case
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : Union[str, Any] = load_json(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : List[Any] = spm_file
lowerCAmelCase__ : str = load_spm(__UpperCAmelCase ,self.sp_model_kwargs )
if lang_codes is not None:
lowerCAmelCase__ : Union[str, Any] = lang_codes
lowerCAmelCase__ : List[Any] = LANGUAGES[lang_codes]
lowerCAmelCase__ : List[str] = [F"""<lang:{lang}>""" for lang in self.langs]
lowerCAmelCase__ : Any = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
lowerCAmelCase__ : List[str] = self.lang_tokens
lowerCAmelCase__ : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCAmelCase__ : List[Any] = {}
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.encoder )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : str = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ : int = [lang_code_id]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.encoder.get(__UpperCAmelCase ,self.encoder[self.unk_token] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCAmelCase__ : Any = self.sp_model.decode(__UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCAmelCase__ : Any = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.sp_model.decode(__UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Any = [1] * len(self.prefix_tokens )
lowerCAmelCase__ : int = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCAmelCase__ : Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ : str = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Optional[Any] = load_spm(self.spm_file ,self.sp_model_kwargs )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
lowerCAmelCase__ : Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowerCAmelCase__ : int = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder ,__UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,__UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = sentencepiece.SentencePieceProcessor(**UpperCamelCase )
spm.Load(str(UpperCamelCase ) )
return spm
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
with open(UpperCamelCase , """r""" ) as f:
return json.load(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
with open(UpperCamelCase , """w""" ) as f:
json.dump(UpperCamelCase , UpperCamelCase , indent=2 )
| 37 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
UpperCamelCase :int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :str = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase :List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCamelCase :List[str] = {"""unk_token""": """<unk>"""}
UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
UpperCamelCase :List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict , **__lowerCamelCase : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase )
def _A ( self : str , **__lowerCamelCase : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase )
def _A ( self : Tuple , **__lowerCamelCase : List[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : str ):
shutil.rmtree(self.tmpdirname )
def _A ( self : str ):
UpperCamelCase :Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : str ):
UpperCamelCase :List[Any] = self.get_tokenizer()
UpperCamelCase :List[Any] = self.get_rust_tokenizer()
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :Any = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase :int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase :str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase :str = self.get_image_processor(do_normalize=__lowerCamelCase )
UpperCamelCase :str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :Dict = self.get_tokenizer()
UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Dict = self.prepare_image_inputs()
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase :Dict = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Dict ):
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :List[Any] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[Any] = """lower newer"""
UpperCamelCase :List[str] = processor(text=__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase :List[str] = tokenizer(__lowerCamelCase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _A ( self : Optional[int] ):
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :Any = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[int] = """lower newer"""
UpperCamelCase :Tuple = self.prepare_image_inputs()
UpperCamelCase :int = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = """google/owlvit-base-patch32"""
UpperCamelCase :List[Any] = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = ["""cat""", """nasa badge"""]
UpperCamelCase :str = processor(text=__lowerCamelCase )
UpperCamelCase :Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : List[str] ):
UpperCamelCase :Dict = """google/owlvit-base-patch32"""
UpperCamelCase :Dict = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCamelCase :Tuple = [["""cat""", """nasa badge"""], ["""person"""]]
UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase )
UpperCamelCase :int = 16
UpperCamelCase :int = len(__lowerCamelCase )
UpperCamelCase :Dict = max([len(__lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = """google/owlvit-base-patch32"""
UpperCamelCase :List[Any] = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[int] = ["""cat""", """nasa badge"""]
UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase )
UpperCamelCase :str = 16
UpperCamelCase :Optional[int] = inputs["""input_ids"""]
UpperCamelCase :str = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _A ( self : str ):
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(images=__lowerCamelCase , query_images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Dict ):
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :str = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Any = processor.batch_decode(__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 38 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowercase = parser.parse_args()
if args.model_type == "bert":
__lowercase = BertForMaskedLM.from_pretrained(args.model_name)
__lowercase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowercase = model.state_dict()
__lowercase = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowercase = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowercase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowercase = state_dict["""cls.predictions.decoder.weight"""]
__lowercase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowercase = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowercase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : List[str] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _lowercase ( _lowercase ):
a = """decision_transformer"""
a = ["""past_key_values"""]
a = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: Dict=17 , UpperCamelCase__: int=4 , UpperCamelCase__: Optional[int]=128 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Dict=3 , UpperCamelCase__: List[str]=1 , UpperCamelCase__: int=None , UpperCamelCase__: Union[str, Any]="relu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[int]=1e-5 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Any=True , UpperCamelCase__: int=50_256 , UpperCamelCase__: Union[str, Any]=50_256 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Union[str, Any]=False , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : Optional[Any] = state_dim
lowerCamelCase__ : Optional[int] = act_dim
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : str = max_ep_len
lowerCamelCase__ : Optional[Any] = action_tanh
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = n_positions
lowerCamelCase__ : Union[str, Any] = n_layer
lowerCamelCase__ : List[Any] = n_head
lowerCamelCase__ : int = n_inner
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : str = resid_pdrop
lowerCamelCase__ : Optional[Any] = embd_pdrop
lowerCamelCase__ : Union[str, Any] = attn_pdrop
lowerCamelCase__ : int = layer_norm_epsilon
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : str = scale_attn_weights
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Any = scale_attn_by_inverse_layer_idx
lowerCamelCase__ : Any = reorder_and_upcast_attn
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : Dict = eos_token_id
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 41 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowerCAmelCase_ ) )
_snake_case = np.random.RandomState(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A fantasy landscape, trending on artstation'
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type='np' , )
_snake_case = output.images
_snake_case = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case = init_image.resize((7_68, 5_12) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A fantasy landscape, trending on artstation'
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase_ , output_type='np' , )
_snake_case = output.images
_snake_case = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 42 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowercase ( self : int ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE :Dict = 1e-4
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : int ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 22 | 0 |
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 4 ):
'''simple docstring'''
__UpperCamelCase :int = abs(SCREAMING_SNAKE_CASE ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE )] for y in range(SCREAMING_SNAKE_CASE )]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return reverse_row(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return reverse_column(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [list(SCREAMING_SNAKE_CASE ) for x in zip(*SCREAMING_SNAKE_CASE )]
return matrix
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = matrix[::-1]
return matrix
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in matrix:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 43 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Tuple = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "poolformer"
def __init__( self , a__=3 , a__=16 , a__=16 , a__=3 , a__=4.0 , a__=[2, 2, 6, 2] , a__=[64, 128, 320, 512] , a__=[7, 3, 3, 3] , a__=[4, 2, 2, 2] , a__=[2, 1, 1, 1] , a__=4 , a__=0.0 , a__="gelu" , a__=True , a__=1e-5 , a__=0.0_2 , **a__ , ):
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : Dict = stride
_lowerCAmelCase : Optional[int] = padding
_lowerCAmelCase : Optional[int] = pool_size
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : Optional[int] = mlp_ratio
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Dict = patch_sizes
_lowerCAmelCase : Tuple = strides
_lowerCAmelCase : Any = num_encoder_blocks
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[Any] = use_layer_scale
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = initializer_range
super().__init__(**a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = version.parse("1.11" )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ):
return 2e-3
| 44 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if args.model_type == "roberta":
SCREAMING_SNAKE_CASE__ = RobertaForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE__ = "roberta"
elif args.model_type == "gpt2":
SCREAMING_SNAKE_CASE__ = GPTaLMHeadModel.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE__ = "transformer"
SCREAMING_SNAKE_CASE__ = model.state_dict()
SCREAMING_SNAKE_CASE__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
SCREAMING_SNAKE_CASE__ = f'{prefix}.embeddings.{w}.weight'
SCREAMING_SNAKE_CASE__ = state_dict[param_name]
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = f'{prefix}.embeddings.LayerNorm.{w}'
SCREAMING_SNAKE_CASE__ = state_dict[param_name]
# Transformer Blocks #
SCREAMING_SNAKE_CASE__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'lm_head.dense.{w}']
SCREAMING_SNAKE_CASE__ = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.ln_f.{w}']
SCREAMING_SNAKE_CASE__ = state_dict["lm_head.weight"]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 46 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
lowerCamelCase : Optional[Any] = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
lowerCamelCase : Optional[int] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
lowerCamelCase : Dict = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def A ( self : Optional[int] , _a : int , _a : Optional[Any] , _a : Union[str, Any]=None , _a : Union[str, Any]=1 , _a : List[str]="binary" , _a : Union[str, Any]=None , _a : Dict="warn" , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 47 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = False , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.01 , _UpperCAmelCase = 1 , ):
__a = False
__a = search_prob
__a = start_temperate
__a = []
__a = 0
__a = None
while not search_end:
__a = current_state.score()
if best_state is None or current_score > best_state.score():
__a = current_state
scores.append(_UpperCAmelCase )
iterations += 1
__a = None
__a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) # picking a random neighbor
__a = neighbors.pop(_UpperCAmelCase )
__a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a = picked_neighbor
else:
__a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a = picked_neighbor
__a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a = True
else:
__a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCAmelCase ) , _UpperCAmelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case :List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__snake_case :str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :Any = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (3 * x**2) - (6 * y)
__snake_case :List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
__snake_case :str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 49 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = """Hello, World!"""
_UpperCAmelCase : Any = """en_XX"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Optional[Any] = Path('data_bin' )
lowerCamelCase__ : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_UpperCAmelCase ).parent ) , checkpoint_file=Path(_UpperCAmelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_UpperCAmelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_UpperCAmelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_UpperCAmelCase )
lowerCamelCase__ : Any = xmod.model.encoder.sentence_encoder
lowerCamelCase__ : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase__ : Optional[int] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , _UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = XmodForSequenceClassification(_UpperCAmelCase ) if classification_head else XmodForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__ : Any = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase__ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase__ : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase__ : Any = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase__ : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase__ : Dict = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase__ : Union[str, Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
lowerCamelCase__ : Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase__ : Any = xmod_layer.self_attn.q_proj.bias
lowerCamelCase__ : Tuple = xmod_layer.self_attn.k_proj.weight
lowerCamelCase__ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase__ : int = xmod_layer.self_attn.v_proj.weight
lowerCamelCase__ : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__ : str = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
lowerCamelCase__ : Optional[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase__ : Tuple = xmod_layer.self_attn.out_proj.bias
lowerCamelCase__ : Dict = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase__ : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase__ : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
lowerCamelCase__ : Tuple = xmod_layer.fca.weight
lowerCamelCase__ : List[str] = xmod_layer.fca.bias
# output
lowerCamelCase__ : List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
lowerCamelCase__ : Union[str, Any] = xmod_layer.fca.weight
lowerCamelCase__ : int = xmod_layer.fca.bias
lowerCamelCase__ : List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase__ : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase__ : Dict = xmod_layer.adapter_layer_norm.weight
lowerCamelCase__ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase__ : Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase__ : List[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase__ : Dict = from_adapter.fca.weight
lowerCamelCase__ : Any = from_adapter.fca.bias
lowerCamelCase__ : Optional[Any] = from_adapter.fca.weight
lowerCamelCase__ : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase__ : List[Any] = xmod_sent_encoder.layer_norm.weight
lowerCamelCase__ : List[str] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase__ : Union[str, Any] = xmod.model.classification_heads['mnli'].dense.weight
lowerCamelCase__ : Any = xmod.model.classification_heads['mnli'].dense.bias
lowerCamelCase__ : int = xmod.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase__ : Union[str, Any] = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase__ : Optional[int] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase__ : Any = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase__ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__ : str = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__ : str = xmod.model.encoder.lm_head.weight
lowerCamelCase__ : Optional[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__ : Optional[Any] = xmod.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(_UpperCAmelCase )[0]
if classification_head:
lowerCamelCase__ : int = xmod.model.classification_heads['mnli'](xmod.extract_features(_UpperCAmelCase ) )
else:
lowerCamelCase__ : Any = xmod.model(_UpperCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__ : Any = torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_UpperCAmelCase : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 50 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
def A (__A : bytes ) -> str:
"""simple docstring"""
return "".join([hex(__A )[2:].zfill(2 ).upper() for byte in list(__A )] )
def A (__A : str ) -> bytes:
"""simple docstring"""
if (len(__A ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__A ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__A ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'roberta'
def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 52 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowercase ( self : Dict , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ):
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 22 | 0 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : str =0
try:
a__ : Dict =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 53 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : List[str] = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=8 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ) -> Tuple:
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[Any]:
if latents is None:
__SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
__SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : int , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 1_0_0 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self._execution_device
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE = self.movq.config.latent_channels
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
__SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = {"image_embeds": image_embeds, "hint": hint}
__SCREAMING_SNAKE_CASE = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
__SCREAMING_SNAKE_CASE = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 54 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __snake_case ( UpperCAmelCase_ : str = "AAPL" ):
lowerCamelCase_ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCamelCase_ = BeautifulSoup(requests.get(UpperCAmelCase_ ).text , "html.parser" )
lowerCamelCase_ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a ( _lowerCamelCase ):
snake_case_ = (UnCLIPScheduler,)
def A_ ( self : Union[str, Any] , **lowercase_ : Union[str, Any] ):
snake_case_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**lowercase_ )
return config
def A_ ( self : str ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def A_ ( self : List[str] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase_ )
def A_ ( self : str ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def A_ ( self : List[str] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase_ )
def A_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase_ )
def A_ ( self : Any ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase_ , prev_timestep=lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
snake_case_ = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def A_ ( self : Optional[int] ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(variance_type='''learned_range''' )
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase_ ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=lowercase_ ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=lowercase_ ) - -0.001_0011 < 1e-5
def A_ ( self : Tuple ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
snake_case_ = scheduler.timesteps
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
snake_case_ = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def A_ ( self : Optional[int] ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(25 )
snake_case_ = scheduler.timesteps
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
snake_case_ = model(lowercase_ , lowercase_ )
if i + 1 == timesteps.shape[0]:
snake_case_ = None
else:
snake_case_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
snake_case_ = scheduler.step(
lowercase_ , lowercase_ , lowercase_ , prev_timestep=lowercase_ , generator=lowercase_ ).prev_sample
snake_case_ = pred_prev_sample
snake_case_ = torch.sum(torch.abs(lowercase_ ) )
snake_case_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def A_ ( self : str ):
pass
def A_ ( self : int ):
pass
| 56 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowercase_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowercase_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowercase_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_MAPPING
lowercase_ = auto_class_update(FlaxAutoModel)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 58 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCamelCase ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : List[str] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class snake_case_( a__ ):
__UpperCamelCase = '''bridgetower_vision_model'''
def __init__( self : Tuple , UpperCamelCase_ : List[str]=7_6_8 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : Dict=2_8_8 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Any=1E-05 , UpperCamelCase_ : str=False , UpperCamelCase_ : int=True , UpperCamelCase_ : int=False , **UpperCamelCase_ : Optional[int] , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Tuple = patch_size
lowerCAmelCase : Any = image_size
lowerCAmelCase : List[str] = initializer_factor
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : List[str] = stop_gradient
lowerCAmelCase : str = share_layernorm
lowerCAmelCase : Tuple = remove_last_layer
@classmethod
def lowerCamelCase__ ( cls : List[str] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCAmelCase : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class snake_case_( a__ ):
__UpperCamelCase = '''bridgetower_text_model'''
def __init__( self : List[str] , UpperCamelCase_ : int=5_0_2_6_5 , UpperCamelCase_ : int=7_6_8 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=5_1_4 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : Tuple=1E-05 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : List[Any]=True , **UpperCamelCase_ : int , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[int] = initializer_factor
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Dict = type_vocab_size
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : int = position_embedding_type
lowerCAmelCase : Optional[int] = use_cache
lowerCAmelCase : Union[str, Any] = pad_token_id
lowerCAmelCase : int = bos_token_id
lowerCAmelCase : List[Any] = eos_token_id
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : str = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCAmelCase : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class snake_case_( a__ ):
__UpperCamelCase = '''bridgetower'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : List[str]=7_6_8 , UpperCamelCase_ : int=1 , UpperCamelCase_ : Union[str, Any]=1E-05 , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict="add" , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Optional[Any]=6 , UpperCamelCase_ : Any=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[int] , ):
# TODO: remove this once the Hub files are updated.
lowerCAmelCase : str = kwargs.pop('''text_config_dict''' , UpperCamelCase_ )
lowerCAmelCase : Tuple = kwargs.pop('''vision_config_dict''' , UpperCamelCase_ )
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Any = share_cross_modal_transformer_layers
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = initializer_factor
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = share_link_tower_layers
lowerCAmelCase : Any = link_tower_type
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Optional[int] = tie_word_embeddings
lowerCAmelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCAmelCase : Optional[Any] = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowerCAmelCase : int = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowerCAmelCase : Dict = BridgeTowerTextConfig(**UpperCamelCase_ )
lowerCAmelCase : int = BridgeTowerVisionConfig(**UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCamelCase_ : BridgeTowerTextConfig , UpperCamelCase_ : BridgeTowerVisionConfig , **UpperCamelCase_ : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase : Optional[Any] = self.vision_config.to_dict()
lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 60 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = int(__lowerCamelCase )
if n_element < 1:
UpperCAmelCase_ : List[Any] = ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ : List[Any] = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = (0, 0, 0)
UpperCAmelCase_ : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_a = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_a = hamming(int(n))
print('-----------------------------------------------------')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 61 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "char"
UpperCAmelCase__ : List[str] = "bpe"
UpperCAmelCase__ : Tuple = "wp"
_A = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["image_processor", "char_tokenizer"]
UpperCAmelCase__ : str = "ViTImageProcessor"
UpperCAmelCase__ : Optional[Any] = "MgpstrTokenizer"
def __init__( self , A_=None , A_=None , **A_ ) -> Union[str, Any]:
__UpperCamelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
__UpperCamelCase =kwargs.pop('feature_extractor' )
__UpperCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__UpperCamelCase =tokenizer
__UpperCamelCase =AutoTokenizer.from_pretrained('gpt2' )
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , **A_ ) -> Any:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__UpperCamelCase =self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None:
__UpperCamelCase =self.char_tokenizer(A_ , return_tensors=A_ , **A_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCamelCase =encodings['input_ids']
return inputs
def _a ( self , A_ ) -> int:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =sequences
__UpperCamelCase =char_preds.size(0 )
__UpperCamelCase , __UpperCamelCase =self._decode_helper(A_ , 'char' )
__UpperCamelCase , __UpperCamelCase =self._decode_helper(A_ , 'bpe' )
__UpperCamelCase , __UpperCamelCase =self._decode_helper(A_ , 'wp' )
__UpperCamelCase =[]
__UpperCamelCase =[]
for i in range(A_ ):
__UpperCamelCase =[char_scores[i], bpe_scores[i], wp_scores[i]]
__UpperCamelCase =[char_strs[i], bpe_strs[i], wp_strs[i]]
__UpperCamelCase =scores.index(max(A_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__UpperCamelCase ={}
__UpperCamelCase =final_strs
__UpperCamelCase =final_scores
__UpperCamelCase =char_strs
__UpperCamelCase =bpe_strs
__UpperCamelCase =wp_strs
return out
def _a ( self , A_ , A_ ) -> str:
if format == DecodeType.CHARACTER:
__UpperCamelCase =self.char_decode
__UpperCamelCase =1
__UpperCamelCase ='[s]'
elif format == DecodeType.BPE:
__UpperCamelCase =self.bpe_decode
__UpperCamelCase =2
__UpperCamelCase ='#'
elif format == DecodeType.WORDPIECE:
__UpperCamelCase =self.wp_decode
__UpperCamelCase =102
__UpperCamelCase ='[SEP]'
else:
raise ValueError(f'Format {format} is not supported.' )
__UpperCamelCase , __UpperCamelCase =[], []
__UpperCamelCase =pred_logits.size(0 )
__UpperCamelCase =pred_logits.size(1 )
__UpperCamelCase , __UpperCamelCase =pred_logits.topk(1 , dim=-1 , largest=A_ , sorted=A_ )
__UpperCamelCase =preds_index.view(-1 , A_ )[:, 1:]
__UpperCamelCase =decoder(A_ )
__UpperCamelCase , __UpperCamelCase =torch.nn.functional.softmax(A_ , dim=2 ).max(dim=2 )
__UpperCamelCase =preds_max_prob[:, 1:]
for index in range(A_ ):
__UpperCamelCase =preds_str[index].find(A_ )
__UpperCamelCase =preds_str[index][:pred_eos]
__UpperCamelCase =preds_index[index].cpu().tolist()
__UpperCamelCase =pred_index.index(A_ ) if eos_token in pred_index else -1
__UpperCamelCase =preds_max_prob[index][: pred_eos_index + 1]
__UpperCamelCase =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A_ )
conf_scores.append(A_ )
return dec_strs, conf_scores
def _a ( self , A_ ) -> int:
__UpperCamelCase =[seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(A_ )]
return decode_strs
def _a ( self , A_ ) -> str:
return self.bpe_tokenizer.batch_decode(A_ )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =[seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(A_ )]
return decode_strs
| 62 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __a : Dict , __a : List[str]=7 , __a : int=3 , __a : Optional[int]=30 , __a : Optional[Any]=4_00 , __a : List[str]=True , __a : Tuple=None , __a : Optional[int]=True , __a : Dict=1 / 2_55 , __a : Any=True , __a : Optional[int]=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , __a : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_pad
def UpperCamelCase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : List[str]=False ):
if not batched:
_a = image_inputs[0]
if isinstance(__a , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size["shortest_edge"] * h / w )
_a = self.size["shortest_edge"]
elif w > h:
_a = self.size["shortest_edge"]
_a = int(self.size["shortest_edge"] * w / h )
else:
_a = self.size["shortest_edge"]
_a = self.size["shortest_edge"]
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(__a , key=lambda __a : item[0] )[0]
_a = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =DetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = DetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Dict ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
self.assertTrue(hasattr(__a , "rescale_factor" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "do_pad" ) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __a )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __a )
def UpperCamelCase__ ( self : Dict ):
pass
def UpperCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a = self.image_processor_tester.get_expected_values(__a , batched=__a )
_a = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : int ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__a , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : List[str] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__a , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self : List[Any] ):
# prepare image and target
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"image_id": 3_97_69, "annotations": target}
# encode them
_a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
_a = image_processing(images=__a , annotations=__a , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1e-4 ) )
# verify area
_a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
_a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
_a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify orig_size
_a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
_a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
@slow
def UpperCamelCase__ ( self : List[str] ):
# prepare image, target and masks_path
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
_a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
_a = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1e-4 ) )
# verify area
_a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
_a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
_a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify masks
_a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __a )
# verify orig_size
_a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
_a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
| 63 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Any = len(snake_case__ )
for i in range(length - 1 ):
_snake_case : Dict = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
_snake_case : Optional[Any] = k
if least != i:
_snake_case , _snake_case : List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 64 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowercase ( self : int ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE :Dict = 1e-4
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : int ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 22 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
UpperCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = BertTokenizer
def __init__(self : Any , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : List[str]="[SEP]" , __UpperCAmelCase : int="[PAD]" , __UpperCAmelCase : List[Any]="[CLS]" , __UpperCAmelCase : str="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**__UpperCAmelCase )
UpperCAmelCase__ = do_lower_case
def lowercase_ (self : int , __UpperCAmelCase : str , __UpperCAmelCase : List[str]=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ (self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 65 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = tf.convert_to_tensor(_lowercase )
snake_case_ :Optional[int] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) ))
return x * cdf
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Dict = tf.convert_to_tensor(_lowercase )
snake_case_ :str = tf.cast(math.pi, x.dtype )
snake_case_ :Union[str, Any] = tf.cast(0.04_4715, x.dtype )
snake_case_ :Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowercase, 3 )) ))
return x * cdf
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :str = tf.convert_to_tensor(_lowercase )
return x * tf.tanh(tf.math.softplus(_lowercase ) )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = tf.convert_to_tensor(_lowercase )
snake_case_ :str = tf.cast(0.04_4715, x.dtype )
snake_case_ :List[str] = tf.cast(0.79_7884_5608, x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :str = tf.convert_to_tensor(_lowercase )
snake_case_ :Optional[Any] = tf.cast(1.702, x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A_ ( _lowercase ):
'''simple docstring'''
return tf.clip_by_value(_gelu(_lowercase ), -10, 10 )
def A_ ( _lowercase, _lowercase=-1 ):
'''simple docstring'''
snake_case_, snake_case_ :List[Any] = tf.split(_lowercase, 2, axis=_lowercase )
return a * tf.math.sigmoid(_lowercase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def A_ ( _lowercase ):
'''simple docstring'''
return tf.keras.activations.gelu(_lowercase, approximate=_lowercase )
__a = tf.keras.activations.gelu
__a = approximate_gelu_wrap
else:
__a = _gelu
__a = _gelu_new
__a = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def A_ ( _lowercase ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 66 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['image_processor', 'tokenizer']
__lowerCamelCase = 'ViTImageProcessor'
__lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> List[Any]:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Any:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
A__ = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if visual_prompt is not None:
A__ = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
A__ = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if visual_prompt is not None and images is not None:
A__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase , )
return self.image_processor_class
@property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase , )
return self.image_processor
| 68 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),)
def a_ ( self, **lowerCAmelCase__) -> List[Any]:
snake_case_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowerCAmelCase__)
return config
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> Any:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1):
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> Dict:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> List[Any]:
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
return sample
def a_ ( self) -> List[str]:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__, 'set_timesteps'):
scheduler.set_timesteps(lowerCAmelCase__)
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__, 'set_timesteps'):
snake_case_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ = scheduler.timesteps[5]
snake_case_ = scheduler.timesteps[6]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def a_ ( self) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ = UniPCMultistepScheduler(**self.get_scheduler_config())
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config)
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
def a_ ( self) -> str:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__)
def a_ ( self) -> List[Any]:
self.check_over_configs(thresholding=lowerCAmelCase__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, )
def a_ ( self) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__)
def a_ ( self) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, )
snake_case_ = self.full_loop(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, )
assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers"
def a_ ( self) -> Any:
self.check_over_configs(lower_order_final=lowerCAmelCase__)
self.check_over_configs(lower_order_final=lowerCAmelCase__)
def a_ ( self) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0)
def a_ ( self) -> Tuple:
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
def a_ ( self) -> str:
snake_case_ = self.full_loop(prediction_type='v_prediction')
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.1014) < 1e-3
def a_ ( self) -> Dict:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
assert sample.dtype == torch.floataa
def a_ ( self, **lowerCAmelCase__) -> str:
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 69 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A__ : Optional[int] =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A__ : List[Any] =logging.getLogger()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_lowerCAmelCase = parser.parse_args()
return args.f
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase="eval" ):
"""simple docstring"""
_lowerCAmelCase = os.path.join(lowerCAmelCase , f"{split}_results.json" )
if os.path.exists(lowerCAmelCase ):
with open(lowerCAmelCase , """r""" ) as f:
return json.load(lowerCAmelCase )
raise ValueError(f"can't find {path}" )
A__ : int =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_flax_glue.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_clm_flax.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_summarization_flax.main()
_lowerCAmelCase = get_results(__snake_case , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_mlm_flax.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_ta_mlm_flax.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def lowercase__ ( self : Optional[Any] ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_flax_ner.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_qa.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 70 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A ( a_ ) -> Optional[Any]:
return 1 / (1 + np.exp(-z ))
def A ( a_ ,a_ ) -> int:
return (-y * np.log(a_ ) - (1 - y) * np.log(1 - h )).mean()
def A ( a_ ,a_ ,a_ ) -> Tuple:
__UpperCamelCase : Optional[int] =np.dot(a_ ,a_ )
return np.sum(y * scores - np.log(1 + np.exp(a_ ) ) )
def A ( a_ ,a_ ,a_ ,a_=70_000 ) -> str:
__UpperCamelCase : Union[str, Any] =np.zeros(x.shape[1] )
for iterations in range(a_ ):
__UpperCamelCase : str =np.dot(a_ ,a_ )
__UpperCamelCase : Any =sigmoid_function(a_ )
__UpperCamelCase : Optional[Any] =np.dot(x.T ,h - y ) / y.size
__UpperCamelCase : int =theta - alpha * gradient # updating the weights
__UpperCamelCase : Dict =np.dot(a_ ,a_ )
__UpperCamelCase : Optional[Any] =sigmoid_function(a_ )
__UpperCamelCase : List[Any] =cost_function(a_ ,a_ )
if iterations % 100 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ :Optional[Any] = datasets.load_iris()
A_ :Any = iris.data[:, :2]
A_ :Optional[Any] = (iris.target != 0) * 1
A_ :Tuple = 0.1
A_ :Any = logistic_reg(alpha, x, y, max_iterations=70000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def A ( a_ ) -> str:
return sigmoid_function(
np.dot(a_ ,a_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((A_) ,(A_)) :Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((A_) ,(A_)) :Optional[Any] = (x[:, 1].min(), x[:, 1].max())
((A_) ,(A_)) :Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ :List[str] = np.c_[xxa.ravel(), xxa.ravel()]
A_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 71 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 | 0 |
"""simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = text, pattern
_lowerCamelCase , _lowerCamelCase : Optional[int] = len(__lowerCAmelCase ), len(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase : str = self.mismatch_in_text(__lowerCAmelCase )
if mismatch_index == -1:
positions.append(__lowerCAmelCase )
else:
_lowerCamelCase : str = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = '''ABAABA'''
lowerCAmelCase__ = '''AB'''
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 72 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : str=1_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_0 ,SCREAMING_SNAKE_CASE__ : List[str]=4_0_0 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[Any]=[0.5, 0.5, 0.5] ,):
__lowerCamelCase : Any = size if size is not None else {'shortest_edge': 1_8}
__lowerCamelCase : int = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : List[Any] = min_resolution
__lowerCamelCase : Any = max_resolution
__lowerCamelCase : Optional[int] = do_resize
__lowerCamelCase : Optional[int] = size
__lowerCamelCase : List[Any] = do_center_crop
__lowerCamelCase : Tuple = crop_size
__lowerCamelCase : List[str] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : int = image_std
def lowerCAmelCase ( self : List[str]):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : List[Any] = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self)
@property
def lowerCAmelCase ( self : int):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_center_crop'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8})
self.assertEqual(image_processor.crop_size ,{'height': 1_8, 'width': 1_8})
__lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4)
self.assertEqual(image_processor.size ,{'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size ,{'height': 8_4, 'width': 8_4})
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCAmelCase ( self : Dict):
# Initialize image_processing
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 73 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22 | 0 |
"""simple docstring"""
from math import factorial
_lowercase = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case__ ) )
def _snake_case ( snake_case__ : int = 60 , snake_case__ : int = 100_0000 ):
if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
A = 0
# the cached sizes of the previous chains
A = {}
for start_chain_element in range(1 , snake_case__ ):
# The temporary set will contain the elements of the chain
A = set()
A = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(snake_case__ )
chain_set_length += 1
A = digit_factorial_sum(snake_case__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""") | 74 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowercase ( self : Dict , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ):
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 22 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =path_or_paths
lowerCamelCase_ =split if split or isinstance(lowerCAmelCase, lowerCAmelCase ) else '''train'''
lowerCamelCase_ =features
lowerCamelCase_ =cache_dir
lowerCamelCase_ =keep_in_memory
lowerCamelCase_ =streaming
lowerCamelCase_ =num_proc
lowerCamelCase_ =kwargs
@abstractmethod
def lowercase__ ( self ):
"""simple docstring"""
pass
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =features
lowerCamelCase_ =cache_dir
lowerCamelCase_ =keep_in_memory
lowerCamelCase_ =streaming
lowerCamelCase_ =num_proc
lowerCamelCase_ =kwargs
@abstractmethod
def lowercase__ ( self ):
"""simple docstring"""
pass
| 75 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : int , *a : List[Any] , **a : Any ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , a , )
super().__init__(*a , **a ) | 76 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 | 0 |
"""simple docstring"""
from manim import *
class UpperCAmelCase_ ( _a):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = Rectangle(height=0.5 , width=0.5 )
lowercase__ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ : Optional[Any] = Rectangle(height=0.25 , width=0.25 )
lowercase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowercase__ : Tuple = [mem.copy() for i in range(6 )]
lowercase__ : str = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : str = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : str = VGroup(a , a ).arrange(a , buff=0 )
lowercase__ : Dict = Text('CPU' , font_size=2_4 )
lowercase__ : Dict = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
lowercase__ : int = [mem.copy() for i in range(4 )]
lowercase__ : List[Any] = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : List[Any] = Text('GPU' , font_size=2_4 )
lowercase__ : Union[str, Any] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
lowercase__ : Tuple = [mem.copy() for i in range(6 )]
lowercase__ : List[Any] = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : Union[str, Any] = Text('Model' , font_size=2_4 )
lowercase__ : Any = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
lowercase__ : int = []
lowercase__ : Tuple = []
for i, rect in enumerate(a ):
lowercase__ : List[Any] = fill.copy().set_fill(a , opacity=0.8 )
target.move_to(a )
model_arr.append(a )
lowercase__ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a )
self.add(*a , *a )
lowercase__ : Tuple = [meta_mem.copy() for i in range(6 )]
lowercase__ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowercase__ : Any = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : Any = VGroup(*a ).arrange(a , buff=0 )
lowercase__ : Any = VGroup(a , a ).arrange(a , buff=0 )
lowercase__ : Tuple = Text('Disk' , font_size=2_4 )
lowercase__ : List[str] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
disk.move_to([-4, -1.25, 0] )
self.add(a , a )
lowercase__ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
lowercase__ : Optional[int] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a )
lowercase__ : str = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a ) )
lowercase__ : List[Any] = Square(0.3 )
input.set_fill(a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a , buff=0.5 )
self.play(Write(a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a , buff=0.02 )
self.play(MoveToTarget(a ) )
self.play(FadeOut(a ) )
lowercase__ : Any = Arrow(start=a , end=a , color=a , buff=0.5 )
a.next_to(model_arr[0].get_left() , a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase__ : List[str] = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) )
lowercase__ : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(a ) , Circumscribe(model_arr[0] , color=a , **a ) , Circumscribe(model_cpu_arr[0] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase__ : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowercase__ : Union[str, Any] = AnimationGroup(
FadeOut(a , run_time=0.5 ) , MoveToTarget(a , run_time=0.5 ) , FadeIn(a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase__ : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **a ) , Circumscribe(cpu_left_col_base[i] , **a ) , Circumscribe(cpu_left_col_base[i + 1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , Circumscribe(model_arr[i + 1] , color=a , **a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a , **a ) , Circumscribe(cpu_left_col_base[-1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase__ : Optional[Any] = a_c
lowercase__ : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(a ) , FadeOut(a , run_time=0.5 ) , )
lowercase__ : Optional[Any] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) , MoveToTarget(a ) )
self.wait()
| 77 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase_ , int(b / 2 ) ) * actual_power(lowercase_ , int(b / 2 ) )
else:
return a * actual_power(lowercase_ , int(b / 2 ) ) * actual_power(lowercase_ , int(b / 2 ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if b < 0:
return 1 / actual_power(lowercase_ , lowercase_ )
return actual_power(lowercase_ , lowercase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 78 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''pixel_values''']
def __init__( self : Dict , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = size if size is not None else {"shortest_edge": 384}
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = do_resize
_A = size
# Default value set here for backwards compatibility where the value in config is None
_A = crop_pct if crop_pct is not None else 224 / 256
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : float , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Optional[Any] , ):
'''simple docstring'''
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_A = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_A = int(shortest_edge / crop_pct )
_A = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : float = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = crop_pct if crop_pct is not None else self.crop_pct
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_A = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_A = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
_A = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_A = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_A = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_A = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 79 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 0 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
else:
return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
def _A ( lowercase , lowercase ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowercase , lowercase )
return actual_power(lowercase , lowercase )
if __name__ == "__main__":
print(power(-2, -3)) | 81 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ = datasets.logging.get_logger(__name__)
A__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=False , snake_case=True , snake_case=False , snake_case="dummy_doc" ):
"""simple docstring"""
_lowerCAmelCase = {doc: key_lines}
_lowerCAmelCase = {doc: sys_lines}
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(snake_case , key_doc_lines[doc] , snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(snake_case , sys_doc_lines[doc] , snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(snake_case , snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(snake_case , snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase = reader.get_mention_assignments(snake_case , snake_case )
_lowerCAmelCase = reader.get_mention_assignments(snake_case , snake_case )
_lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = get_coref_infos(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = evaluator.evaluate_documents(snake_case , snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 1_00:.2f}' , F' Precision: {precision * 1_00:.2f}' , F' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
_lowerCAmelCase = (conll / 3) * 1_00
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase = line.split()[5]
if not parse_col == "-":
_lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case ( self , _snake_case , _snake_case , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False ):
"""simple docstring"""
_lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase = util.check_gold_parse_annotation(_snake_case )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase = evaluate(
key_lines=_snake_case , sys_lines=_snake_case , metrics=_snake_case , NP_only=_snake_case , remove_nested=_snake_case , keep_singletons=_snake_case , min_span=_snake_case , )
return score
| 82 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case_ : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def A__ ( UpperCAmelCase_ ):
for pegasus_name, hf_name in PATTERNS:
_UpperCamelCase : Optional[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
_UpperCamelCase : Any = PegasusConfig(**UpperCAmelCase_ )
_UpperCamelCase : int = PegasusForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = torch_model.model.state_dict()
_UpperCamelCase : Optional[int] = {}
for k, v in tf_weights.items():
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCamelCase : int = v.T
_UpperCamelCase : List[str] = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCamelCase : List[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_UpperCamelCase : Any = mapping['shared.weight']
_UpperCamelCase : Optional[Any] = mapping['shared.weight']
_UpperCamelCase : str = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : int = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def A__ ( UpperCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
_UpperCamelCase : str = tf.train.list_variables(UpperCAmelCase_ )
_UpperCamelCase : int = {}
_UpperCamelCase : Optional[int] = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
_UpperCamelCase : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase : List[str] = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = array
return tf_weights
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
# save tokenizer first
_UpperCamelCase : Optional[int] = Path(UpperCAmelCase_ ).parent.name
_UpperCamelCase : Dict = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
_UpperCamelCase : Tuple = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
_UpperCamelCase : Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCamelCase : Optional[int] = task_specific_params
_UpperCamelCase : int = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
snake_case_ : Optional[int] = parser.parse_args()
if args.save_dir is None:
snake_case_ : List[str] = Path(args.tf_ckpt_path).parent.name
snake_case_ : Optional[int] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 83 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :List[nn.Module] = field(default_factory=A__ )
UpperCAmelCase_ :list = field(default_factory=A__ )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(__A , nn.Convad ) or isinstance(__A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__A )
def __call__( self , __A ) -> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__A )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self ) -> List[str]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :List = field(default_factory=A__ )
UpperCAmelCase_ :List = field(default_factory=A__ )
def __call__( self , __A ) -> List[str]:
lowerCAmelCase_ :Tuple = Tracker(self.dest )(__A ).parametrized
lowerCAmelCase_ :Optional[Any] = Tracker(self.src )(__A ).parametrized
lowerCAmelCase_ :Optional[Any] = list(filter(lambda __A : type(__A ) not in self.src_skip , __A ) )
lowerCAmelCase_ :Dict = list(filter(lambda __A : type(__A ) not in self.dest_skip , __A ) )
if len(__A ) != len(__A ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(__A )} operations while"""
f""" destination module has {len(__A )}.""" )
for dest_m, src_m in zip(__A , __A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def _snake_case ( lowercase__ : str , lowercase__ : ResNetConfig , lowercase__ : Path , lowercase__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase_ :int = timm.create_model(lowercase__ , pretrained=lowercase__ ).eval()
lowerCAmelCase_ :Tuple = ResNetForImageClassification(lowercase__ ).eval()
lowerCAmelCase_ :List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ )
lowerCAmelCase_ :str = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowercase__ )
assert torch.allclose(from_model(lowercase__ ) , our_model(lowercase__ ).logits ), "The model logits don't match the original one."
lowerCAmelCase_ :Optional[Any] = f"""resnet{"-".join(name.split("resnet" ) )}"""
print(lowercase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
# we can use the convnext one
lowerCAmelCase_ :Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
print(f"""Pushed {checkpoint_name}""" )
def _snake_case ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ :Optional[Any] = 1_0_0_0
lowerCAmelCase_ :Union[str, Any] = (1, num_labels)
lowerCAmelCase_ :str = """huggingface/label-files"""
lowerCAmelCase_ :Optional[Any] = num_labels
lowerCAmelCase_ :Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :int = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Dict = idalabel
lowerCAmelCase_ :Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ :Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
lowerCAmelCase_ :str = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(lowercase__ , names_to_config[model_name] , lowercase__ , lowercase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 84 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : List[str] = TypeVar("T")
class _snake_case ( Generic[T] ):
def __init__( self , a__ = True ) -> None:
'''simple docstring'''
snake_case_ = {} # dictionary of lists
snake_case_ = directed
def lowerCAmelCase__ ( self , a__ , a__ ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
self.adj_list[destination_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a__ )
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ = [destination_vertex]
snake_case_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ = [destination_vertex]
snake_case_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 85 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :Optional[int] = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :str = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 22 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class A__ ( Generic[T]):
def __init__( self , _SCREAMING_SNAKE_CASE = True ):
__lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
__lowerCAmelCase : Optional[int] = directed
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
self.adj_list[destination_vertex].append(_SCREAMING_SNAKE_CASE )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowerCAmelCase : str = [destination_vertex]
__lowerCAmelCase : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowerCAmelCase : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowerCAmelCase : str = [destination_vertex]
__lowerCAmelCase : int = []
return self
def __repr__( self ):
return pformat(self.adj_list ) | 86 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase ( self : Any ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase ( self : List[str] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowercase ( self : int ):
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE :Dict = 1e-4
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Dict ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase ( self : List[Any] ):
_UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : int ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 22 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCamelCase = random.Random()
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : int=None , _lowerCamelCase : int=None):
if rng is None:
lowercase__ : Optional[int] = global_rng
lowercase__ : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class snake_case_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any=7 , lowercase_ : List[str]=4_00 , lowercase_ : int=20_00 , lowercase_ : Union[str, Any]=10 , lowercase_ : Tuple=1_60 , lowercase_ : Dict=8 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=40_00 , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=True , ) -> List[Any]:
lowercase__ : Any = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = min_seq_length
lowercase__ : Optional[Any] = max_seq_length
lowercase__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : Tuple = padding_value
lowercase__ : Optional[Any] = sampling_rate
lowercase__ : Optional[int] = return_attention_mask
lowercase__ : Union[str, Any] = do_normalize
lowercase__ : List[str] = feature_size
lowercase__ : List[str] = chunk_length
lowercase__ : List[Any] = hop_length
def __UpperCamelCase ( self : Dict ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str=False , lowercase_ : Optional[int]=False ) -> Union[str, Any]:
def _flatten(lowercase_ : List[str] ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
lowercase__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : Tuple = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( __A ,unittest.TestCase ):
__A : Any = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : Tuple = WhisperFeatureExtractionTester(self )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Union[str, Any] = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
lowercase__ : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase_ )
lowercase__ : Union[str, Any] = feat_extract_first.to_dict()
lowercase__ : List[Any] = feat_extract_second.to_dict()
lowercase__ : Optional[Any] = feat_extract_first.mel_filters
lowercase__ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = os.path.join(lowercase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase_ )
lowercase__ : Optional[Any] = self.feature_extraction_class.from_json_file(lowercase_ )
lowercase__ : List[Any] = feat_extract_first.to_dict()
lowercase__ : Tuple = feat_extract_second.to_dict()
lowercase__ : int = feat_extract_first.mel_filters
lowercase__ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : Tuple = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : Dict = feature_extractor(lowercase_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ : str = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
lowercase__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase__ : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Any = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowercase__ : Optional[Any] = np.asarray(lowercase_ )
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test truncation required
lowercase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
lowercase__ : int = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
lowercase__ : Optional[int] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ : Optional[int] = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
lowercase__ : List[str] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def __UpperCamelCase ( self : int ) -> Dict:
import torch
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Dict = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowercase__ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
lowercase__ : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : Dict = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Tuple ) -> Tuple:
# fmt: off
lowercase__ : Any = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowercase__ : Dict = self._load_datasamples(1 )
lowercase__ : Union[str, Any] = WhisperFeatureExtractor()
lowercase__ : List[Any] = feature_extractor(lowercase_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
def __UpperCamelCase ( self : Any ) -> str:
lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Tuple = self._load_datasamples(1 )[0]
lowercase__ : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
lowercase__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1E-3 ) )
| 87 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def a__ ( A_, A_, A_ = 1, A_ = 1, A_ = 1.0e4, A_ = False, A_ = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__magic_name__ = float(embedding_dim // 2 )
__magic_name__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__magic_name__ = min_timescale * jnp.exp(jnp.arange(A_, dtype=jnp.floataa ) * -log_timescale_increment )
__magic_name__ = jnp.expand_dims(A_, 1 ) * jnp.expand_dims(A_, 0 )
# scale embeddings
__magic_name__ = scale * emb
if flip_sin_to_cos:
__magic_name__ = jnp.concatenate([jnp.cos(A_ ), jnp.sin(A_ )], axis=1 )
else:
__magic_name__ = jnp.concatenate([jnp.sin(A_ ), jnp.cos(A_ )], axis=1 )
__magic_name__ = jnp.reshape(A_, [jnp.shape(A_ )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 32
a__ = jnp.floataa
@nn.compact
def __call__( self : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCamelCase__ )
__magic_name__ = nn.silu(UpperCamelCase__ )
__magic_name__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCamelCase__ )
return temb
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
a__ = 32
a__ = False
a__ = 1
@nn.compact
def __call__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return get_sinusoidal_embeddings(
UpperCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 88 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : int = 'roberta-prelayernorm'
def __init__( self : int ,_UpperCAmelCase : Any=50265 ,_UpperCAmelCase : int=768 ,_UpperCAmelCase : Any=12 ,_UpperCAmelCase : Union[str, Any]=12 ,_UpperCAmelCase : Tuple=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : Tuple=512 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : Any=1E-12 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : List[Any]=0 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : int="absolute" ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Tuple=None ,**_UpperCAmelCase : int ,):
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Union[str, Any] = vocab_size
_a : Dict = hidden_size
_a : int = num_hidden_layers
_a : List[str] = num_attention_heads
_a : Union[str, Any] = hidden_act
_a : int = intermediate_size
_a : Optional[int] = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : List[str] = type_vocab_size
_a : List[Any] = initializer_range
_a : Dict = layer_norm_eps
_a : Optional[int] = position_embedding_type
_a : int = use_cache
_a : int = classifier_dropout
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 89 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=sys.maxsize ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'bilinear'
__lowerCamelCase = max_size
__lowerCamelCase = short_edge_length
def __call__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
for img in imgs:
__lowerCamelCase , __lowerCamelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowerCamelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowerCamelCase = size * 1.0 / min(lowerCamelCase__ , lowerCamelCase__ )
if h < w:
__lowerCamelCase , __lowerCamelCase = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase = scale * h, size
if max(lowerCamelCase__ , lowerCamelCase__ ) > self.max_size:
__lowerCamelCase = self.max_size * 1.0 / max(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = newh * scale
__lowerCamelCase = neww * scale
__lowerCamelCase = int(neww + 0.5 )
__lowerCamelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowerCamelCase = Image.fromarray(lowerCamelCase__ )
__lowerCamelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__lowerCamelCase = np.asarray(lowerCamelCase__ )
else:
__lowerCamelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowerCamelCase = nn.functional.interpolate(
lowerCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCamelCase__ ).squeeze(0 )
img_augs.append(lowerCamelCase__ )
return img_augs
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__lowerCamelCase = cfg.INPUT.FORMAT
__lowerCamelCase = cfg.SIZE_DIVISIBILITY
__lowerCamelCase = cfg.PAD_VALUE
__lowerCamelCase = cfg.INPUT.MAX_SIZE_TEST
__lowerCamelCase = cfg.MODEL.DEVICE
__lowerCamelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase = lambda lowerCamelCase__ : (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = tuple(max(lowerCamelCase__ ) for s in zip(*[img.shape for img in images] ) )
__lowerCamelCase = [im.shape[-2:] for im in images]
__lowerCamelCase = [
nn.functional.pad(
lowerCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return torch.stack(lowerCamelCase__ ), torch.tensor(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = [images]
if single_image:
assert len(lowerCamelCase__ ) == 1
for i in range(len(lowerCamelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCamelCase__ , images.pop(lowerCamelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCamelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCamelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__lowerCamelCase = torch.tensor([im.shape[:2] for im in images] )
__lowerCamelCase = self.aug(lowerCamelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowerCamelCase = [self.normalizer(lowerCamelCase__ ) for x in images]
# now pad them to do the following operations
__lowerCamelCase , __lowerCamelCase = self.pad(lowerCamelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowerCamelCase = torch.true_divide(lowerCamelCase__ , lowerCamelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple[int, int] ) -> Dict:
"""simple docstring"""
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
__lowerCamelCase , __lowerCamelCase = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase__ )
| 90 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ : Union[str, Any] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ : Optional[Any] = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
UpperCAmelCase_ : Any = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = SqueezeBertTokenizer
def __init__( self : str , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=True , lowercase_ : int="[UNK]" , lowercase_ : List[Any]="[SEP]" , lowercase_ : str="[PAD]" , lowercase_ : List[str]="[CLS]" , lowercase_ : Tuple="[MASK]" , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=None , **lowercase_ : Tuple , ):
'''simple docstring'''
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , lowercase_) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowercase_ , normalizer_state.pop('''type'''))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : str = strip_accents
SCREAMING_SNAKE_CASE_ : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : List[str] = normalizer_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Tuple , lowercase_ : str=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 91 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( snake_case__ ):
_a : List[Any] = ["""image_processor""", """tokenizer"""]
_a : int = """OwlViTImageProcessor"""
_a : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , _A=None , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A , _A )
def __call__( self , _A=None , _A=None , _A=None , _A="max_length" , _A="np" , **_A ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_A , _A ) or (isinstance(_A , _A ) and not isinstance(text[0] , _A )):
__lowerCAmelCase = [self.tokenizer(_A , padding=_A , return_tensors=_A , **_A )]
elif isinstance(_A , _A ) and isinstance(text[0] , _A ):
__lowerCAmelCase = []
# Maximum number of queries across batch
__lowerCAmelCase = max([len(_A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_A ) != max_num_queries:
__lowerCAmelCase = t + [" "] * (max_num_queries - len(_A ))
__lowerCAmelCase = self.tokenizer(_A , padding=_A , return_tensors=_A , **_A )
encodings.append(_A )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__lowerCAmelCase = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__lowerCAmelCase = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCAmelCase = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__lowerCAmelCase = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCAmelCase = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__lowerCAmelCase = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCAmelCase = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__lowerCAmelCase = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__lowerCAmelCase = BatchEncoding()
__lowerCAmelCase = input_ids
__lowerCAmelCase = attention_mask
if query_images is not None:
__lowerCAmelCase = BatchEncoding()
__lowerCAmelCase = self.image_processor(
_A , return_tensors=_A , **_A ).pixel_values
__lowerCAmelCase = query_pixel_values
if images is not None:
__lowerCAmelCase = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.image_processor.post_process(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , )
return self.image_processor
| 92 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 0:
return False
lowercase_ : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Union[str, Any] = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
_UpperCAmelCase = []
for temp in range(int(__lowercase ) ):
series.append(f'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :str = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 22 | 0 |
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Optional[int] =[1]
for i in range(2 , SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a__ : int =[]
a__ : Optional[Any] =list(range(SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
a__ : int =factorials.pop()
a__ , a__ : int =divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """altclip_text_model"""
def __init__( self , lowercase=250002 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=0.02 , lowercase=1E-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=768 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : List[str] = initializer_factor
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Dict = position_embedding_type
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : List[Any] = project_dim
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """altclip_vision_model"""
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=224 , lowercase=32 , lowercase="quick_gelu" , lowercase=1E-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Tuple = projection_dim
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = initializer_factor
_lowerCamelCase : Dict = attention_dropout
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Union[str, Any] = hidden_act
@classmethod
def A_ ( cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
_lowerCamelCase : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase , **lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """altclip"""
lowerCamelCase__ = True
def __init__( self , lowercase=None , lowercase=None , lowercase=768 , lowercase=2.65_92 , **lowercase ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_lowerCamelCase : Optional[int] = kwargs.pop('text_config_dict' , lowercase )
_lowerCamelCase : Optional[int] = kwargs.pop('vision_config_dict' , lowercase )
super().__init__(**lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_lowerCamelCase : int = {}
# This is the complete result when using `text_config_dict`.
_lowerCamelCase : str = AltCLIPTextConfig(**lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_lowerCamelCase : int = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCamelCase : Optional[int] = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_lowerCamelCase : List[str] = {}
# This is the complete result when using `vision_config_dict`.
_lowerCamelCase : List[Any] = AltCLIPVisionConfig(**lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_lowerCamelCase : int = {
str(lowercase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_lowerCamelCase : Optional[Any] = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCamelCase : Optional[int] = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_lowerCamelCase : Dict = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
_lowerCamelCase : List[Any] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
_lowerCamelCase : List[str] = AltCLIPTextConfig(**lowercase )
_lowerCamelCase : Optional[int] = AltCLIPVisionConfig(**lowercase )
_lowerCamelCase : Optional[Any] = projection_dim
_lowerCamelCase : List[Any] = logit_scale_init_value
_lowerCamelCase : List[str] = 1.0
@classmethod
def A_ ( cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def A_ ( self ):
_lowerCamelCase : str = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Tuple = self.text_config.to_dict()
_lowerCamelCase : List[Any] = self.vision_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output | 96 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowercase ( self : Dict , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ):
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 22 | 0 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__snake_case = 637_8137.0
__snake_case = 635_6752.31_4245
__snake_case = 6378137
def a ( __a , __a , __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase__ :Optional[Any] = atan((1 - flattening) * tan(radians(__a ) ) )
UpperCamelCase__ :List[str] = atan((1 - flattening) * tan(radians(__a ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase__ :Dict = haversine_distance(__a , __a , __a , __a ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase__ :Tuple = (b_lata + b_lata) / 2
UpperCamelCase__ :Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase__ :Tuple = (sin(__a ) ** 2) * (cos(__a ) ** 2)
UpperCamelCase__ :List[Any] = cos(sigma / 2 ) ** 2
UpperCamelCase__ :Optional[Any] = (sigma - sin(__a )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase__ :Union[str, Any] = (cos(__a ) ** 2) * (sin(__a ) ** 2)
UpperCamelCase__ :List[str] = sin(sigma / 2 ) ** 2
UpperCamelCase__ :int = (sigma + sin(__a )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ = True
for i in range(0 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ = False
for i in range(1 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
lowerCAmelCase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase__ : List[Any] = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 98 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A_ ( ) -> Any:
a__ : Tuple = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ : Dict = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(A__ )
# Let's go
a__ : str = parser.parse_args()
if not hasattr(A__ , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ : Optional[Any] = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 99 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__magic_name__ = int(input())
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
__magic_name__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__magic_name__, __magic_name__ = map(int, input().split())
__magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ = burst_time
__magic_name__ = no_of_processes
__magic_name__ = waiting_time
__magic_name__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 100 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.