code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase_: Any = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class a__ ( _a ):
snake_case_ = "esm"
def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1026, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=None, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, mask_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = emb_layer_norm_before
lowercase__ = token_dropout
lowercase__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ = EsmFoldConfig()
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = EsmFoldConfig(**_UpperCAmelCase )
lowercase__ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ = get_default_vocab_list()
else:
lowercase__ = vocab_list
else:
lowercase__ = None
lowercase__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", _UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = super().to_dict()
if isinstance(self.esmfold_config, _UpperCAmelCase ):
lowercase__ = self.esmfold_config.to_dict()
return output
@dataclass
class a__ :
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 128
snake_case_ = None
def snake_case__ ( self ):
'''simple docstring'''
if self.trunk is None:
lowercase__ = TrunkConfig()
elif isinstance(self.trunk, _UpperCAmelCase ):
lowercase__ = TrunkConfig(**self.trunk )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = asdict(self )
lowercase__ = self.trunk.to_dict()
return output
@dataclass
class a__ :
snake_case_ = 48
snake_case_ = 1024
snake_case_ = 128
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 128
snake_case_ = None
def snake_case__ ( self ):
'''simple docstring'''
if self.structure_module is None:
lowercase__ = StructureModuleConfig()
elif isinstance(self.structure_module, _UpperCAmelCase ):
lowercase__ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase__ = self.sequence_state_dim // self.sequence_head_width
lowercase__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = asdict(self )
lowercase__ = self.structure_module.to_dict()
return output
@dataclass
class a__ :
snake_case_ = 384
snake_case_ = 128
snake_case_ = 16
snake_case_ = 128
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1e-8
snake_case_ = 1e5
def snake_case__ ( self ):
'''simple docstring'''
return asdict(self )
def __a ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 718 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __a ( A ):
lowercase__ = filter(lambda A : p.requires_grad , model.parameters() )
lowercase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_: str = logging.getLogger(__name__)
def __a ( A , A ):
if metric == "rouge2":
lowercase__ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase__ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase__ = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
lowercase__ = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
lowercase__ = ModelCheckpoint(
dirpath=A , filename=A , monitor=f'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __a ( A , A ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A , verbose=A , )
class a__ ( pl.Callback ):
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCAmelCase )
@rank_zero_only
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=True ):
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowercase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase__ = od / "test_results.txt"
lowercase__ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowercase__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase, "a+" ) as writer:
for key in sorted(_UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase__ = metrics[key]
if isinstance(_UpperCAmelCase, torch.Tensor ):
lowercase__ = val.item()
lowercase__ = F'''{key}: {val:.6f}\n'''
writer.write(_UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
lowercase__ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_UpperCAmelCase )
@rank_zero_only
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
try:
lowercase__ = pl_module.model.model.num_parameters()
except AttributeError:
lowercase__ = pl_module.model.num_parameters()
lowercase__ = count_trainable_parameters(_UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(_UpperCAmelCase, _UpperCAmelCase, "test" )
@rank_zero_only
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 719 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from typing import Any
def __a ( A , A , A , A , A , ):
'''simple docstring'''
_validation(
A , A , A , A , A , )
# Creates data structures and fill initial step
lowercase__ = {}
lowercase__ = {}
for state in states_space:
lowercase__ = observations_space[0]
lowercase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowercase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(A ) ):
lowercase__ = observations_space[o]
lowercase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowercase__ = ""
lowercase__ = -1
for k_state in states_space:
lowercase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowercase__ = probability
lowercase__ = k_state
# Update probabilities and pointers dicts
lowercase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowercase__ = arg_max
# The final observation
lowercase__ = observations_space[len(A ) - 1]
# argmax for given final observation
lowercase__ = ""
lowercase__ = -1
for k_state in states_space:
lowercase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowercase__ = probability
lowercase__ = k_state
lowercase__ = arg_max
# Process pointers backwards
lowercase__ = last_state
lowercase__ = []
for o in range(len(A ) - 1 , -1 , -1 ):
result.append(A )
lowercase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def __a ( A , A , A , A , A , ):
'''simple docstring'''
_validate_not_empty(
A , A , A , A , A , )
_validate_lists(A , A )
_validate_dicts(
A , A , A )
def __a ( A , A , A , A , A , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def __a ( A , A ):
'''simple docstring'''
_validate_list(A , "observations_space" )
_validate_list(A , "states_space" )
def __a ( A , A ):
'''simple docstring'''
if not isinstance(_object , A ):
lowercase__ = f'''{var_name} must be a list'''
raise ValueError(A )
else:
for x in _object:
if not isinstance(A , A ):
lowercase__ = f'''{var_name} must be a list of strings'''
raise ValueError(A )
def __a ( A , A , A , ):
'''simple docstring'''
_validate_dict(A , "initial_probabilities" , A )
_validate_nested_dict(A , "transition_probabilities" )
_validate_nested_dict(A , "emission_probabilities" )
def __a ( A , A ):
'''simple docstring'''
_validate_dict(_object , A , A )
for x in _object.values():
_validate_dict(A , A , A , A )
def __a ( A , A , A , A = False ):
'''simple docstring'''
if not isinstance(_object , A ):
lowercase__ = f'''{var_name} must be a dict'''
raise ValueError(A )
if not all(isinstance(A , A ) for x in _object ):
lowercase__ = f'''{var_name} all keys must be strings'''
raise ValueError(A )
if not all(isinstance(A , A ) for x in _object.values() ):
lowercase__ = "nested dictionary " if nested else ""
lowercase__ = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_: int = logging.get_logger(__name__)
lowerCAmelCase_: Any = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "codegen"
snake_case_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=5_0400, _UpperCAmelCase=2048, _UpperCAmelCase=2048, _UpperCAmelCase=4096, _UpperCAmelCase=28, _UpperCAmelCase=16, _UpperCAmelCase=64, _UpperCAmelCase=None, _UpperCAmelCase="gelu_new", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=1E-5, _UpperCAmelCase=0.02, _UpperCAmelCase=True, _UpperCAmelCase=5_0256, _UpperCAmelCase=5_0256, _UpperCAmelCase=False, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_ctx
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = rotary_dim
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, tie_word_embeddings=_UpperCAmelCase, **_UpperCAmelCase )
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = "default", _UpperCAmelCase = None, _UpperCAmelCase = False, ):
'''simple docstring'''
super().__init__(_UpperCAmelCase, task=_UpperCAmelCase, patching_specs=_UpperCAmelCase, use_past=_UpperCAmelCase )
if not getattr(self._config, "pad_token_id", _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__ = 0
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase, direction="inputs" )
lowercase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self ):
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = -1, _UpperCAmelCase = -1, _UpperCAmelCase = False, _UpperCAmelCase = None, ):
'''simple docstring'''
lowercase__ = super(_UpperCAmelCase, self ).generate_dummy_inputs(
_UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs["attention_mask"]
if self.use_past:
lowercase__ = ordered_inputs["attention_mask"].dtype
lowercase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCAmelCase, _UpperCAmelCase, dtype=_UpperCAmelCase )], dim=1 )
return ordered_inputs
@property
def snake_case__ ( self ):
'''simple docstring'''
return 13
| 721 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Any = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "nllb-moe"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, _UpperCAmelCase=12_8112, _UpperCAmelCase=1024, _UpperCAmelCase=12, _UpperCAmelCase=4096, _UpperCAmelCase=16, _UpperCAmelCase=12, _UpperCAmelCase=4096, _UpperCAmelCase=16, _UpperCAmelCase=0.05, _UpperCAmelCase=0.05, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase="relu", _UpperCAmelCase=1024, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=2, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase="float32", _UpperCAmelCase=False, _UpperCAmelCase=128, _UpperCAmelCase=64, _UpperCAmelCase=4, _UpperCAmelCase=4, _UpperCAmelCase=0.001, _UpperCAmelCase=0.001, _UpperCAmelCase="all", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=1.0, _UpperCAmelCase=0.2, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=False, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = router_z_loss_coef
lowercase__ = router_aux_loss_coef
lowercase__ = decoder_sparse_step
lowercase__ = encoder_sparse_step
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = batch_prioritized_routing
lowercase__ = second_expert_policy
lowercase__ = normalize_router_prob_before_dropping
lowercase__ = moe_eval_capacity_token_fraction
lowercase__ = moe_token_dropout
lowercase__ = output_router_logits
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, is_encoder_decoder=_UpperCAmelCase, decoder_start_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
| 700 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=32, _UpperCAmelCase=2, _UpperCAmelCase=3, _UpperCAmelCase=16, _UpperCAmelCase=[1, 2, 1], _UpperCAmelCase=[2, 2, 4], _UpperCAmelCase=2, _UpperCAmelCase=2.0, _UpperCAmelCase=True, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase="gelu", _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=True, _UpperCAmelCase=None, _UpperCAmelCase=True, _UpperCAmelCase=10, _UpperCAmelCase=8, _UpperCAmelCase=["stage1", "stage2", "stage3"], _UpperCAmelCase=[1, 2, 3], ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
lowercase__ = ["stem"]
lowercase__ = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
'''simple docstring'''
return
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase ):
lowercase__ = 0
return t
def check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase={} ):
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase, _UpperCAmelCase ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
lowercase__ = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class a__ ( unittest.TestCase , _a ):
snake_case_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case_ = MaskFormerSwinConfig
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
lowercase__ = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 701 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def __a ( A , A ):
'''simple docstring'''
for e in env_keys:
lowercase__ = int(os.environ.get(A , -1 ) )
if val >= 0:
return val
return default
def __a ( A , A=False ):
'''simple docstring'''
lowercase__ = os.environ.get(A , str(A ) )
return strtobool(A ) == 1 # As its name indicates `strtobool` actually returns an int...
def __a ( A , A="no" ):
'''simple docstring'''
lowercase__ = os.environ.get(A , str(A ) )
return value
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def __a ( A ):
'''simple docstring'''
lowercase__ = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowercase__ = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(A ).content
if __name__ == "__main__":
lowerCAmelCase_: int = input("Enter Video/IGTV url: ").strip()
lowerCAmelCase_: Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 703 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = RemBertConfig.from_json_file(A )
print("Building PyTorch model from configuration: {}".format(str(A ) ) )
lowercase__ = RemBertModel(A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A , A , A )
# Save pytorch-model
print("Save PyTorch model to {}".format(A ) )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_: str = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 704 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 705 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class a__ ( _a , unittest.TestCase ):
snake_case_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("albert-base-v2" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowercase__ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
lowercase__ = (1, 11, 768)
self.assertEqual(output.shape, _UpperCAmelCase )
lowercase__ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], _UpperCAmelCase, atol=1E-4 ) )
| 706 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_: Tuple = logging.getLogger(__name__)
@dataclass
class a__ :
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case_ = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=_a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class a__ :
snake_case_ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case_ = field(
default=_a , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __a ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
lowercase__ = import_module("tasks" )
try:
lowercase__ = getattr(A , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(A ) )
lowercase__ = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A , A ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(A , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(A )]
lowercase__ = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(A )
lowercase__ , lowercase__ = align_predictions(A , A )
lowercase__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def __a ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 708 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase_: Optional[Any] = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase_: int = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase_: int = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase_: Tuple = "\n{0} = None\n"
lowerCAmelCase_: Any = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase_: Any = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def __a ( A ):
'''simple docstring'''
lowercase__ = _re_backend.findall(A )
if len(A ) == 0:
return None
return "_and_".join(A )
def __a ( ):
'''simple docstring'''
with open(os.path.join(A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowercase__ = 0
lowercase__ = {}
# Go through the end of the file
while line_index < len(A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowercase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(A ) and len(lines[line_index] ) > 1:
lowercase__ = lines[line_index]
lowercase__ = _re_single_line_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A ) > 0:
lowercase__ = objects
else:
line_index += 1
return backend_specific_objects
def __a ( A , A ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(A )
elif name.islower():
return DUMMY_FUNCTION.format(A , A )
else:
return DUMMY_CLASS.format(A , A )
def __a ( A=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowercase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowercase__ = {}
for backend, objects in backend_specific_objects.items():
lowercase__ = "[" + ", ".join(f'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
lowercase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A , A ) for o in objects] )
lowercase__ = dummy_file
return dummy_files
def __a ( A=False ):
'''simple docstring'''
lowercase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowercase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowercase__ = os.path.join(A , "utils" )
lowercase__ = {
backend: os.path.join(A , f'''dummy_{short_names.get(A , A )}_objects.py''' )
for backend in dummy_files.keys()
}
lowercase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A ):
with open(A , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.read()
else:
lowercase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(A , A )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f'''diffusers.utils.dummy_{short_names.get(A , A )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase_: Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase_: List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 709 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_: List[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Dict = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = ["CLIPFeatureExtractor"]
lowerCAmelCase_: Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: int = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 0 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(A ) , A )
return number - int(A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 711 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=12, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=32, _UpperCAmelCase=32, _UpperCAmelCase=2, _UpperCAmelCase=4, _UpperCAmelCase=37, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=0.02, _UpperCAmelCase=0, _UpperCAmelCase=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = projection_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = bos_token_id
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ = input_mask.numpy()
lowercase__ , lowercase__ = input_mask.shape
lowercase__ = np.random.randint(1, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
lowercase__ = 1
lowercase__ = 0
lowercase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = TFBlipTextModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, training=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase, training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( _a , unittest.TestCase ):
snake_case_ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case_ = False
snake_case_ = False
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = BlipTextModelTester(self )
lowercase__ = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFBlipTextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
| 712 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( A , A , A , A , A ):
'''simple docstring'''
with open(A ) as metadata_file:
lowercase__ = json.load(A )
lowercase__ = LukeConfig(use_entity_aware_attention=A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowercase__ = torch.load(A , map_location="cpu" )
# Load the entity vocab file
lowercase__ = load_entity_vocab(A )
lowercase__ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ = AddedToken("<ent>" , lstrip=A , rstrip=A )
lowercase__ = AddedToken("<ent2>" , lstrip=A , rstrip=A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A )
with open(os.path.join(A , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A , A )
lowercase__ = LukeTokenizer.from_pretrained(A )
# Initialize the embeddings of the special tokens
lowercase__ = state_dict["embeddings.word_embeddings.weight"]
lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ = f'''encoder.layer.{layer_index}.attention.self.'''
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
lowercase__ = entity_emb[entity_vocab["[MASK]"]]
lowercase__ = LukeModel(config=A ).eval()
lowercase__ , lowercase__ = model.load_state_dict(A , strict=A )
if not (len(A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {', '.join(A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
lowercase__ = LukeTokenizer.from_pretrained(A , task="entity_classification" )
lowercase__ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
lowercase__ = (39, 42)
lowercase__ = tokenizer(A , entity_spans=[span] , add_prefix_space=A , return_tensors="pt" )
lowercase__ = model(**A )
# Verify word hidden states
if model_size == "large":
lowercase__ = torch.Size((1, 42, 10_24) )
lowercase__ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
lowercase__ = torch.Size((1, 42, 7_68) )
lowercase__ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase__ = torch.Size((1, 1, 10_24) )
lowercase__ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
lowercase__ = torch.Size((1, 1, 7_68) )
lowercase__ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A ) )
model.save_pretrained(A )
def __a ( A ):
'''simple docstring'''
lowercase__ = {}
with open(A , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A ):
lowercase__ , lowercase__ = line.rstrip().split("\t" )
lowercase__ = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCAmelCase_: str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 713 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase_: List[str] = namedtuple("from_to", "from_ to")
lowerCAmelCase_: int = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00_454, 264.172),
"cubicyard": from_to(0.76_455, 1.30_795),
"cubicfoot": from_to(0.028, 35.3_147),
"cup": from_to(0.000_236_588, 4_2_2_6.7_5),
}
def __a ( A , A , A ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = list(range(len(A ) ) )
lowercase__ = [v / w for v, w in zip(A , A )]
index.sort(key=lambda A : ratio[i] , reverse=A )
lowercase__ = 0
lowercase__ = [0] * len(A )
for i in index:
if weight[i] <= capacity:
lowercase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowercase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase_: Dict = logging.get_logger(__name__)
def __a ( A , A ):
'''simple docstring'''
def run_func(A ):
@wraps(A )
def run_in_eager_mode(*A , **A ):
return func(*A , **A )
@wraps(A )
@tf.function(experimental_compile=A )
def run_in_graph_mode(*A , **A ):
return func(*A , **A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = random.Random()
lowercase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( _a ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = "TensorFlow"
@property
def snake_case__ ( self ):
'''simple docstring'''
return tf.__version__
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_inference_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_speed(_inference )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_train_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_speed(_train )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], _UpperCAmelCase )
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_inference_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_memory(_inference )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], _UpperCAmelCase )
lowercase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowercase__ = self._prepare_train_func(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
return self._measure_memory(_train )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowercase__ = (
hasattr(_UpperCAmelCase, "architectures" )
and isinstance(config.architectures, _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase__ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase__ = __import__("transformers", fromlist=[model_class] )
lowercase__ = getattr(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowercase__ = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
lowercase__ = config.vocab_size if hasattr(_UpperCAmelCase, "vocab_size" ) else config.encoder.vocab_size
lowercase__ = random_input_ids(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase, training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase, training=_UpperCAmelCase )
lowercase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowercase__ = (
hasattr(_UpperCAmelCase, "architectures" )
and isinstance(config.architectures, _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase__ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase__ = __import__("transformers", fromlist=[model_class] )
lowercase__ = getattr(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowercase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
lowercase__ = config.vocab_size if hasattr(_UpperCAmelCase, "vocab_size" ) else config.encoder.vocab_size
lowercase__ = random_input_ids(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_decoder_train():
lowercase__ = model(_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase )[0]
lowercase__ = tf.gradients(_UpperCAmelCase, model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla )
def encoder_train():
lowercase__ = model(_UpperCAmelCase, labels=_UpperCAmelCase, training=_UpperCAmelCase )[0]
lowercase__ = tf.gradients(_UpperCAmelCase, model.trainable_variables )
return gradients
lowercase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_UpperCAmelCase, repeat=1, number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase__ = timeit.repeat(
_UpperCAmelCase, repeat=self.args.repeat, number=10, )
return min(_UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
lowercase__ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
lowercase__ = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
lowercase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase__ = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
lowercase__ = meminfo.used
lowercase__ = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
lowercase__ = None
else:
lowercase__ = measure_peak_memory_cpu(_UpperCAmelCase )
lowercase__ = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase__ = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
lowercase__ = summary.total
else:
lowercase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 716 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=2_2050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __a ( A ):
'''simple docstring'''
return EnvironmentCommand()
class a__ ( _a ):
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = parser.add_parser("env" )
download_parser.set_defaults(func=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = huggingface_hub.__version__
lowercase__ = "not installed"
lowercase__ = "NA"
if is_torch_available():
import torch
lowercase__ = torch.__version__
lowercase__ = torch.cuda.is_available()
lowercase__ = "not installed"
if is_transformers_available():
import transformers
lowercase__ = transformers.__version__
lowercase__ = "not installed"
if is_accelerate_available():
import accelerate
lowercase__ = accelerate.__version__
lowercase__ = "not installed"
if is_xformers_available():
import xformers
lowercase__ = xformers.__version__
lowercase__ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(_UpperCAmelCase ) )
return info
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 718 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
def __a ( A = 10 , A = 10_00 , A = True ):
assert (
isinstance(A , A )
and isinstance(A , A )
and isinstance(A , A )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __a ( A , A ):
return int((number_a + number_a) / 2 )
def __a ( A , A , A ):
assert (
isinstance(A , A ) and isinstance(A , A ) and isinstance(A , A )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(A ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
lowercase__ = lower
lowercase__ = higher
lowercase__ = []
while True:
lowercase__ = get_avg(A , A )
last_numbers.append(A )
if answer(A ) == "low":
lowercase__ = number
elif answer(A ) == "high":
lowercase__ = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def __a ( ):
lowercase__ = int(input("Enter lower value : " ).strip() )
lowercase__ = int(input("Enter high value : " ).strip() )
lowercase__ = int(input("Enter value to guess : " ).strip() )
guess_the_number(A , A , A )
if __name__ == "__main__":
main()
| 719 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
lowerCAmelCase_: str = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "transfo-xl"
snake_case_ = ["mems"]
snake_case_ = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=26_7735, _UpperCAmelCase=[2_0000, 4_0000, 20_0000], _UpperCAmelCase=1024, _UpperCAmelCase=1024, _UpperCAmelCase=16, _UpperCAmelCase=64, _UpperCAmelCase=4096, _UpperCAmelCase=4, _UpperCAmelCase=False, _UpperCAmelCase=18, _UpperCAmelCase=1600, _UpperCAmelCase=1000, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=0, _UpperCAmelCase=-1, _UpperCAmelCase=True, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=True, _UpperCAmelCase="normal", _UpperCAmelCase=0.01, _UpperCAmelCase=0.01, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=0, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
@property
def snake_case__ ( self ):
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 720 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
lowercase__ = str(A )
return len(A ) == 9 and set(A ) == set("123456789" )
def __a ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
lowercase__ = 10_00_02 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
lowercase__ = 1_00_20_03 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 721 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class a__ :
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError()
def snake_case__ ( self ):
'''simple docstring'''
raise NotImplementedError()
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = False, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = tokenizer
lowercase__ = skip_prompt
lowercase__ = decode_kwargs
# variables used in the streaming process
lowercase__ = []
lowercase__ = 0
lowercase__ = True
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowercase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase__ = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowercase__ = text[self.print_len :]
lowercase__ = []
lowercase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(_UpperCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase__ = text[self.print_len :]
self.print_len += len(_UpperCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase__ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_UpperCAmelCase )
self.on_finalized_text(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
if len(self.token_cache ) > 0:
lowercase__ = self.tokenizer.decode(self.token_cache, **self.decode_kwargs )
lowercase__ = text[self.print_len :]
lowercase__ = []
lowercase__ = 0
else:
lowercase__ = ""
lowercase__ = True
self.on_finalized_text(_UpperCAmelCase, stream_end=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = False ):
'''simple docstring'''
print(_UpperCAmelCase, flush=_UpperCAmelCase, end="" if not stream_end else None )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase ):
'''simple docstring'''
super().__init__(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = Queue()
lowercase__ = None
lowercase__ = timeout
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = False ):
'''simple docstring'''
self.text_queue.put(_UpperCAmelCase, timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal, timeout=self.timeout )
def __iter__( self ):
'''simple docstring'''
return self
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 700 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a__ ( _a ):
snake_case_ = "microsoft/speecht5_tts"
snake_case_ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
snake_case_ = "text_reader"
snake_case_ = SpeechTaProcessor
snake_case_ = SpeechTaForTextToSpeech
snake_case_ = SpeechTaHifiGan
snake_case_ = ["text"]
snake_case_ = ["audio"]
def snake_case__ ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase__ = "microsoft/speecht5_hifigan"
super().setup()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = self.pre_processor(text=_UpperCAmelCase, return_tensors="pt", truncation=_UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowercase__ = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation" )
lowercase__ = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_UpperCAmelCase ).cpu().detach()
| 701 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCAmelCase_: str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCAmelCase_: List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCAmelCase_: List[str] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase_: Optional[int] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowercase__ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(A ):
os.makedirs(A )
lowercase__ = model.state_dict()
def to_tf_var_name(A ):
for patt, repl in iter(A ):
lowercase__ = name.replace(A , A )
return f'''bert/{name}'''
def create_tf_var(A , A , A ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=A , shape=tensor.shape , name=A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(A )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=A , name=A , session=A )
tf.keras.backend.set_value(A , A )
lowercase__ = session.run(A )
print(f'''Successfully created {tf_name}: {np.allclose(A , A )}''' )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(A , os.path.join(A , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __a ( A=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=A , required=A , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=A , default=A , required=A , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=A , required=A , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=A , required=A , help="Directory in which to save tensorflow model" )
lowercase__ = parser.parse_args(A )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
from math import isqrt
def __a ( A ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(A ) + 1 ) )
def __a ( A = 10**6 ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 1
lowercase__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 704 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Tuple = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
if len(A ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __a ( A ):
'''simple docstring'''
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a__ :
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = np.abs((a - b) ).max()
self.assertLessEqual(_UpperCAmelCase, _UpperCAmelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_vision_text_model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = {"vision_model": vision_model, "text_model": text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_vision_text_model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = {"vision_model": vision_model, "text_model": text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
lowercase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
lowercase__ = model(input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase )
lowercase__ = after_output[0]
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_vision_text_model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = {"vision_model": vision_model, "text_model": text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
lowercase__ = model(
input_ids=_UpperCAmelCase, pixel_values=_UpperCAmelCase, attention_mask=_UpperCAmelCase, output_attentions=_UpperCAmelCase )
lowercase__ = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = to_atuple(vision_model.config.image_size )
lowercase__ = to_atuple(vision_model.config.patch_size )
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
pt_model.to(_UpperCAmelCase )
pt_model.eval()
# prepare inputs
lowercase__ = inputs_dict
lowercase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase__ = pt_model(**_UpperCAmelCase ).to_tuple()
lowercase__ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase, from_pt=_UpperCAmelCase )
lowercase__ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ), "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase, from_flax=_UpperCAmelCase )
pt_model_loaded.to(_UpperCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowercase__ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ), "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(_UpperCAmelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), _UpperCAmelCase )
lowercase__ = fx_state
self.check_pt_flax_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
lowercase__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase, fx_model.params )
self.check_pt_flax_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCAmelCase )
@is_pt_flax_cross_test
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_inputs_dict.pop("vision_config" )
lowercase__ = config_inputs_dict.pop("text_config" )
lowercase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
self.check_equivalence_flax_to_pt(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_pretrained_model_and_inputs()
lowercase__ = model_a(**_UpperCAmelCase )
lowercase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
lowercase__ = model_a(**_UpperCAmelCase )
lowercase__ = after_outputs[0]
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase, 1E-5 )
@require_flax
class a__ ( _a , unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert", vision_from_pt=_UpperCAmelCase, text_from_pt=_UpperCAmelCase, )
lowercase__ = 13
lowercase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowercase__ = random_attention_mask([batch_size, 4] )
lowercase__ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = FlaxViTModel(_UpperCAmelCase )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxViTModelTester(self )
lowercase__ = FlaxBertModelTester(self )
lowercase__ = vit_model_tester.prepare_config_and_inputs()
lowercase__ = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ = vision_config_and_inputs
lowercase__ , lowercase__ , lowercase__ , lowercase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a__ ( _a , unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert", vision_from_pt=_UpperCAmelCase, text_from_pt=_UpperCAmelCase, )
lowercase__ = 13
lowercase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowercase__ = random_attention_mask([batch_size, 4] )
lowercase__ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModel(_UpperCAmelCase )
lowercase__ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModelTester(self )
lowercase__ = FlaxBertModelTester(self )
lowercase__ = clip_model_tester.prepare_config_and_inputs()
lowercase__ = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ = vision_config_and_inputs
lowercase__ , lowercase__ , lowercase__ , lowercase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0 )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ = processor(
text=["una foto di un gatto", "una foto di un cane"], images=_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="np" )
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowercase__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, _UpperCAmelCase, atol=1E-3 ) )
| 707 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Optional[int] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "swinv2"
snake_case_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, _UpperCAmelCase=224, _UpperCAmelCase=4, _UpperCAmelCase=3, _UpperCAmelCase=96, _UpperCAmelCase=[2, 2, 6, 2], _UpperCAmelCase=[3, 6, 12, 24], _UpperCAmelCase=7, _UpperCAmelCase=4.0, _UpperCAmelCase=True, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase="gelu", _UpperCAmelCase=False, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=32, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(_UpperCAmelCase )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
lowercase__ = (0, 0, 0, 0)
| 708 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_: Dict = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class a__ ( _a ):
snake_case_ = "ernie_m"
snake_case_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self, _UpperCAmelCase = 25_0002, _UpperCAmelCase = 768, _UpperCAmelCase = 12, _UpperCAmelCase = 12, _UpperCAmelCase = 3072, _UpperCAmelCase = "gelu", _UpperCAmelCase = 0.1, _UpperCAmelCase = 0.1, _UpperCAmelCase = 514, _UpperCAmelCase = 0.02, _UpperCAmelCase = 1, _UpperCAmelCase = 1E-05, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = classifier_dropout
lowercase__ = is_decoder
lowercase__ = act_dropout
| 711 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = "vision-encoder-decoder"
snake_case_ = True
def __init__( self, **_UpperCAmelCase ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase__ = kwargs.pop("encoder" )
lowercase__ = encoder_config.pop("model_type" )
lowercase__ = kwargs.pop("decoder" )
lowercase__ = decoder_config.pop("model_type" )
lowercase__ = AutoConfig.for_model(_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = AutoConfig.for_model(_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = True
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase__ = True
lowercase__ = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.encoder.to_dict()
lowercase__ = self.decoder.to_dict()
lowercase__ = self.__class__.model_type
return output
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class a__ ( _a ):
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase__ = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = -1, _UpperCAmelCase = -1, _UpperCAmelCase = False, _UpperCAmelCase = None, ):
'''simple docstring'''
import torch
lowercase__ = OrderedDict()
lowercase__ = super().generate_dummy_inputs(
_UpperCAmelCase, batch_size=_UpperCAmelCase, seq_length=_UpperCAmelCase, is_pair=_UpperCAmelCase, framework=_UpperCAmelCase )
lowercase__ , lowercase__ = dummy_input["input_ids"].shape
lowercase__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase__ = dummy_input.pop("input_ids" )
lowercase__ = dummy_input.pop("attention_mask" )
lowercase__ = torch.zeros(_UpperCAmelCase )
return common_inputs
class a__ ( _a ):
@property
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = "default" ):
'''simple docstring'''
lowercase__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase, _UpperCAmelCase )
| 712 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase_: List[str] = True
except ImportError:
lowerCAmelCase_: Dict = False
try:
from torch.hub import _get_torch_home
lowerCAmelCase_: Dict = _get_torch_home()
except ImportError:
lowerCAmelCase_: Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCAmelCase_: List[str] = os.path.join(torch_cache_home, "transformers")
lowerCAmelCase_: Dict = "https://cdn.huggingface.co"
lowerCAmelCase_: List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCAmelCase_: str = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCAmelCase_: Any = os.path.join(PATH, "config.yaml")
lowerCAmelCase_: Union[str, Any] = os.path.join(PATH, "attributes.txt")
lowerCAmelCase_: Dict = os.path.join(PATH, "objects.txt")
lowerCAmelCase_: int = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCAmelCase_: List[str] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase_: Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase_: Any = "pytorch_model.bin"
lowerCAmelCase_: Optional[int] = "config.yaml"
def __a ( A=OBJECTS , A=ATTRIBUTES ):
'''simple docstring'''
lowercase__ = []
with open(A ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
lowercase__ = []
with open(A ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __a ( A ):
'''simple docstring'''
lowercase__ = OrderedDict()
with open(A , "rb" ) as f:
lowercase__ = pkl.load(A )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowercase__ = ckp.pop(A )
if isinstance(A , np.ndarray ):
lowercase__ = torch.tensor(A )
else:
assert isinstance(A , torch.tensor ), type(A )
lowercase__ = v
return r
class a__ :
snake_case_ = {}
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = "root", _UpperCAmelCase=0 ):
'''simple docstring'''
lowercase__ = name
lowercase__ = level
lowercase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowercase__ = copy.deepcopy(_UpperCAmelCase )
lowercase__ = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = Config(_UpperCAmelCase, name=_UpperCAmelCase, level=level + 1 )
lowercase__ = v
setattr(self, _UpperCAmelCase, _UpperCAmelCase )
lowercase__ = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = val
lowercase__ = val
lowercase__ = key.split("." )
lowercase__ = len(_UpperCAmelCase ) - 1
lowercase__ = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self, _UpperCAmelCase ) and isinstance(getattr(self, _UpperCAmelCase ), _UpperCAmelCase ):
setattr(getattr(self, _UpperCAmelCase ), ".".join(levels[i:] ), _UpperCAmelCase )
if l == last_level:
lowercase__ = val
else:
lowercase__ = pointer[l]
def snake_case__ ( self ):
'''simple docstring'''
return self._pointer
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
with open(F'''{file_name}''', "w" ) as stream:
dump(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
with open(F'''{file_name}''', "w" ) as stream:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
with open(_UpperCAmelCase ) as stream:
lowercase__ = load(_UpperCAmelCase, Loader=_UpperCAmelCase )
return data
def __str__( self ):
'''simple docstring'''
lowercase__ = " "
if self._name != "root":
lowercase__ = F'''{t * (self._level-1)}{self._name}:\n'''
else:
lowercase__ = ""
lowercase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n'''
lowercase__ = level
return r[:-1]
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ , lowercase__ = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = kwargs.pop("cache_dir", _UpperCAmelCase )
lowercase__ = kwargs.pop("force_download", _UpperCAmelCase )
lowercase__ = kwargs.pop("resume_download", _UpperCAmelCase )
lowercase__ = kwargs.pop("proxies", _UpperCAmelCase )
lowercase__ = kwargs.pop("local_files_only", _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
lowercase__ = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
lowercase__ = pretrained_model_name_or_path
else:
lowercase__ = hf_bucket_url(_UpperCAmelCase, filename=_UpperCAmelCase, use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowercase__ = cached_path(
_UpperCAmelCase, cache_dir=_UpperCAmelCase, force_download=_UpperCAmelCase, proxies=_UpperCAmelCase, resume_download=_UpperCAmelCase, local_files_only=_UpperCAmelCase, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowercase__ = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
lowercase__ = "Can't load config for"
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def __a ( A ):
'''simple docstring'''
lowercase__ = torch.load("dump.pt" , map_location=in_tensor.device )
lowercase__ = in_tensor.numpy()
lowercase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A , A , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(A , A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __a ( A ):
'''simple docstring'''
lowercase__ = urlparse(A )
return parsed.scheme in ("http", "https")
def __a ( A , A , A=True ):
'''simple docstring'''
lowercase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowercase__ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __a ( A , A , A=None , A=0 , A=None , ):
'''simple docstring'''
lowercase__ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A , A ):
ua += "; " + "; ".join("{}/{}".format(A , A ) for k, v in user_agent.items() )
elif isinstance(A , A ):
ua += "; " + user_agent
lowercase__ = {"user-agent": ua}
if resume_size > 0:
lowercase__ = "bytes=%d-" % (resume_size,)
lowercase__ = requests.get(A , stream=A , proxies=A , headers=A )
if response.status_code == 4_16: # Range not satisfiable
return
lowercase__ = response.headers.get("Content-Length" )
lowercase__ = resume_size + int(A ) if content_length is not None else None
lowercase__ = tqdm(
unit="B" , unit_scale=A , total=A , initial=A , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A ) )
temp_file.write(A )
progress.close()
def __a ( A , A=None , A=False , A=None , A=10 , A=False , A=None , A=False , ):
'''simple docstring'''
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(A , A ):
lowercase__ = str(A )
os.makedirs(A , exist_ok=A )
lowercase__ = None
if not local_files_only:
try:
lowercase__ = requests.head(A , allow_redirects=A , proxies=A , timeout=A )
if response.status_code == 2_00:
lowercase__ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowercase__ = url_to_filename(A , A )
# get cache path to put the file
lowercase__ = os.path.join(A , A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A ):
return cache_path
else:
lowercase__ = [
file
for file in fnmatch.filter(os.listdir(A ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A ) > 0:
return os.path.join(A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowercase__ = cache_path + ".lock"
with FileLock(A ):
# If the download just completed while the lock was activated.
if os.path.exists(A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowercase__ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A , "a+b" ) as f:
yield f
lowercase__ = _resumable_file_manager
if os.path.exists(A ):
lowercase__ = os.stat(A ).st_size
else:
lowercase__ = 0
else:
lowercase__ = partial(tempfile.NamedTemporaryFile , dir=A , delete=A )
lowercase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A , temp_file.name , )
http_get(
A , A , proxies=A , resume_size=A , user_agent=A , )
os.replace(temp_file.name , A )
lowercase__ = {"url": url, "etag": etag}
lowercase__ = cache_path + ".json"
with open(A , "w" ) as meta_file:
json.dump(A , A )
return cache_path
def __a ( A , A=None ):
'''simple docstring'''
lowercase__ = url.encode("utf-8" )
lowercase__ = shaaaa(A )
lowercase__ = url_hash.hexdigest()
if etag:
lowercase__ = etag.encode("utf-8" )
lowercase__ = shaaaa(A )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __a ( A , A=None , A=False , A=None , A=False , A=None , A=False , A=False , A=False , ):
'''simple docstring'''
if cache_dir is None:
lowercase__ = TRANSFORMERS_CACHE
if isinstance(A , A ):
lowercase__ = str(A )
if isinstance(A , A ):
lowercase__ = str(A )
if is_remote_url(A ):
# URL, so get it from the cache (downloading if necessary)
lowercase__ = get_from_cache(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , user_agent=A , local_files_only=A , )
elif os.path.exists(A ):
# File, and it exists.
lowercase__ = url_or_filename
elif urlparse(A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A ) )
if extract_compressed_file:
if not is_zipfile(A ) and not tarfile.is_tarfile(A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowercase__ , lowercase__ = os.path.split(A )
lowercase__ = output_file.replace("." , "-" ) + "-extracted"
lowercase__ = os.path.join(A , A )
if os.path.isdir(A ) and os.listdir(A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowercase__ = output_path + ".lock"
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
os.makedirs(A )
if is_zipfile(A ):
with ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
elif tarfile.is_tarfile(A ):
lowercase__ = tarfile.open(A )
tar_file.extractall(A )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A ) )
return output_path_extracted
return output_path
def __a ( A , A="," ):
'''simple docstring'''
assert isinstance(A , A )
if os.path.isfile(A ):
with open(A ) as f:
lowercase__ = eval(f.read() )
else:
lowercase__ = requests.get(A )
try:
lowercase__ = requests.json()
except Exception:
lowercase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowercase__ = eval(A )
except Exception:
lowercase__ = data.split("\n" )
req.close()
return data
def __a ( A ):
'''simple docstring'''
lowercase__ = requests.get(A )
lowercase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __a ( A ):
'''simple docstring'''
lowercase__ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A )
with open(A , "rb" ) as stream:
lowercase__ = pkl.load(A )
lowercase__ = weights.pop("model" )
lowercase__ = {}
for k, v in model.items():
lowercase__ = torch.from_numpy(A )
if "running_var" in k:
lowercase__ = torch.tensor([0] )
lowercase__ = k.replace("running_var" , "num_batches_tracked" )
lowercase__ = zero
return new
def __a ( ):
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(A , os.pardir ) )}/demo.ipynb''' )
def __a ( A , A="RGB" ):
'''simple docstring'''
assert isinstance(A , A )
if os.path.isfile(A ):
lowercase__ = cva.imread(A )
else:
lowercase__ = get_image_from_url(A )
assert img is not None, f'''could not connect to: {im}'''
lowercase__ = cva.cvtColor(A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowercase__ = img[:, :, ::-1]
return img
def __a ( A , A=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A ) , A ))
| 713 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def snake_case__ ( *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
pass
def __a ( A ):
'''simple docstring'''
lowercase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __a ( A ):
'''simple docstring'''
lowercase__ = np.array(A )
lowercase__ = npimg.shape
return {"hash": hashimage(A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class a__ ( unittest.TestCase ):
snake_case_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = MaskGenerationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = pipeline("mask-generation", model="facebook/sam-vit-huge" )
lowercase__ = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256 )
# Shortening by hashing
lowercase__ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871}
], )
# fmt: on
@require_torch
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "facebook/sam-vit-huge"
lowercase__ = pipeline("mask-generation", model=_UpperCAmelCase )
lowercase__ = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
lowercase__ = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
], )
| 714 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_: List[Any] = logging.get_logger(__name__)
UpperCAmelCase_: Optional[Any] = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class a__ ( _a ):
snake_case_ = "canine"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1_6384, _UpperCAmelCase=16, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0Xe000, _UpperCAmelCase=0Xe001, _UpperCAmelCase=4, _UpperCAmelCase=4, _UpperCAmelCase=8, _UpperCAmelCase=1_6384, _UpperCAmelCase=128, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Character config:
lowercase__ = downsampling_rate
lowercase__ = upsampling_kernel_size
lowercase__ = num_hash_functions
lowercase__ = num_hash_buckets
lowercase__ = local_transformer_stride
| 715 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase_: List[str] = "true"
def __a ( A , A=82 , A=16 ):
'''simple docstring'''
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(A )
lowercase__ = RegressionDataset(length=A )
lowercase__ = DataLoader(A , batch_size=A )
model.to(accelerator.device )
lowercase__ , lowercase__ = accelerator.prepare(A , A )
return model, ddp_model, dataloader
def __a ( A , A=False ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
lowercase__ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(A ):
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
with accelerator.main_process_first():
lowercase__ = dataset.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , )
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
if use_longest:
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return DataLoader(A , shuffle=A , collate_fn=A , batch_size=16 )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator(dispatch_batches=A , split_batches=A )
lowercase__ = get_dataloader(A , not dispatch_batches )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=A )
lowercase__ , lowercase__ = accelerator.prepare(A , A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = []
for batch in dataloader:
lowercase__ , lowercase__ = batch.values()
with torch.no_grad():
lowercase__ = model(A )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ , lowercase__ = [], []
for logit, targ in logits_and_targets:
logits.append(A )
targs.append(A )
lowercase__ , lowercase__ = torch.cat(A ), torch.cat(A )
return logits, targs
def __a ( A , A=82 , A=False , A=False , A=16 ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ = get_basic_setup(A , A , A )
lowercase__ , lowercase__ = generate_predictions(A , A , A )
assert (
len(A ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A )}'''
def __a ( A = False , A = False ):
'''simple docstring'''
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ , lowercase__ = get_mrpc_setup(A , A )
# First do baseline
lowercase__ , lowercase__ , lowercase__ = setup["no"]
model.to(A )
model.eval()
for batch in dataloader:
batch.to(A )
with torch.inference_mode():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A , references=batch["labels"] )
lowercase__ = metric.compute()
# Then do distributed
lowercase__ , lowercase__ , lowercase__ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ = batch["labels"]
lowercase__ , lowercase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A , references=A )
lowercase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __a ( ):
'''simple docstring'''
lowercase__ = Accelerator(split_batches=A , dispatch_batches=A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(A , A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__ = Accelerator(split_batches=A , dispatch_batches=A )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(A , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
lowercase__ = Accelerator()
test_torch_metrics(A , 5_12 )
accelerator.state._reset_state()
def __a ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase_: int = logging.get_logger(__name__)
lowerCAmelCase_: List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase_: Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(A , A )
if weight_type is not None:
lowercase__ = getattr(A , A ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , A )
if "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = "weight"
else:
lowercase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def __a ( A , A , A=None ):
'''simple docstring'''
lowercase__ = torch.load(A )
lowercase__ = WavLMConfigOrig(checkpoint["cfg"] )
lowercase__ = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase__ = WavLMConfig.from_pretrained(A )
else:
lowercase__ = WavLMConfig()
lowercase__ = WavLMModel(A )
recursively_load_weights(A , A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCAmelCase_: Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 0 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 718 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 719 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 720 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 0 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 721 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] ="""markuplm"""
def __init__( self : Dict , UpperCamelCase : Optional[int]=3_05_22 , UpperCamelCase : List[Any]=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Optional[Any]=12 , UpperCamelCase : str=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Any=5_12 , UpperCamelCase : List[Any]=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : List[str]=0 , UpperCamelCase : str=2 , UpperCamelCase : List[Any]=2_56 , UpperCamelCase : Tuple=10_24 , UpperCamelCase : int=2_16 , UpperCamelCase : Optional[int]=10_01 , UpperCamelCase : int=32 , UpperCamelCase : Tuple=50 , UpperCamelCase : Dict="absolute" , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : List[str] = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Optional[Any] = use_cache
_snake_case : str = classifier_dropout
# additional properties
_snake_case : Optional[Any] = max_depth
_snake_case : List[Any] = max_xpath_tag_unit_embeddings
_snake_case : str = max_xpath_subs_unit_embeddings
_snake_case : Dict = tag_pad_id
_snake_case : Tuple = subs_pad_id
_snake_case : Optional[int] = xpath_unit_hidden_size
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: bool = False )-> str:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Expected string as input, found {type(lowerCAmelCase )}"""
raise ValueError(lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = F"""Expected boolean as use_pascal parameter, found {type(lowerCAmelCase )}"""
raise ValueError(lowerCAmelCase )
_snake_case : str = input_str.split('_' )
_snake_case : str = 0 if use_pascal else 1
_snake_case : Any = words[start_index:]
_snake_case : List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_snake_case : List[Any] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any =["""input_features""", """is_longer"""]
def __init__( self : Tuple , UpperCamelCase : Tuple=64 , UpperCamelCase : int=4_80_00 , UpperCamelCase : List[Any]=4_80 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Any=10_24 , UpperCamelCase : Optional[Any]=0.0 , UpperCamelCase : Optional[int]=False , UpperCamelCase : float = 0 , UpperCamelCase : float = 1_40_00 , UpperCamelCase : int = None , UpperCamelCase : str = "fusion" , UpperCamelCase : str = "repeatpad" , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = top_db
_snake_case : Dict = truncation
_snake_case : Any = padding
_snake_case : Tuple = fft_window_size
_snake_case : Optional[Any] = (fft_window_size >> 1) + 1
_snake_case : Optional[Any] = hop_length
_snake_case : str = max_length_s
_snake_case : Union[str, Any] = max_length_s * sampling_rate
_snake_case : str = sampling_rate
_snake_case : List[str] = frequency_min
_snake_case : List[str] = frequency_max
_snake_case : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase , min_frequency=UpperCamelCase , max_frequency=UpperCamelCase , sampling_rate=UpperCamelCase , norm=UpperCamelCase , mel_scale='htk' , )
_snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase , min_frequency=UpperCamelCase , max_frequency=UpperCamelCase , sampling_rate=UpperCamelCase , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
_snake_case : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : np.array , UpperCamelCase : Optional[np.array] = None ):
'''simple docstring'''
_snake_case : List[Any] = spectrogram(
UpperCamelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Dict = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_snake_case : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_snake_case : List[Any] = [0]
# randomly choose index for each part
_snake_case : Tuple = np.random.choice(ranges[0] )
_snake_case : Tuple = np.random.choice(ranges[1] )
_snake_case : Union[str, Any] = np.random.choice(ranges[2] )
_snake_case : Dict = mel[idx_front : idx_front + chunk_frames, :]
_snake_case : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
_snake_case : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_snake_case : Any = torch.tensor(mel[None, None, :] )
_snake_case : List[str] = torch.nn.functional.interpolate(
UpperCamelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase )
_snake_case : Optional[Any] = mel_shrink[0][0].numpy()
_snake_case : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase_ ( self : str , UpperCamelCase : np.array , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_snake_case : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_snake_case : Optional[Any] = len(UpperCamelCase ) - max_length
_snake_case : Tuple = np.random.randint(0 , overflow + 1 )
_snake_case : Any = waveform[idx : idx + max_length]
_snake_case : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_snake_case : int = self._np_extract_fbank_features(UpperCamelCase , self.mel_filters )
_snake_case : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_snake_case : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_snake_case : List[str] = np.stack([mel, mel, mel, mel] , axis=0 )
_snake_case : int = False
else:
_snake_case : Optional[Any] = self._random_mel_fusion(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : Dict = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
_snake_case : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_snake_case : Optional[Any] = int(max_length / len(UpperCamelCase ) )
_snake_case : Union[str, Any] = np.stack(np.tile(UpperCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_snake_case : Optional[int] = int(max_length / len(UpperCamelCase ) )
_snake_case : Union[str, Any] = np.stack(np.tile(UpperCamelCase , UpperCamelCase ) )
_snake_case : List[Any] = np.pad(UpperCamelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_snake_case : List[Any] = self._np_extract_fbank_features(UpperCamelCase , self.mel_filters )
_snake_case : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_snake_case : Optional[int] = self._np_extract_fbank_features(UpperCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase : str = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
_snake_case : Tuple = truncation if truncation is not None else self.truncation
_snake_case : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_snake_case : int = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case : Union[str, Any] = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : Tuple = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
_snake_case : Optional[Any] = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : List[Any] = [np.asarray(UpperCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_snake_case : Any = [
self._get_input_mel(UpperCamelCase , max_length if max_length else self.nb_max_samples , UpperCamelCase , UpperCamelCase )
for waveform in raw_speech
]
_snake_case : Dict = []
_snake_case : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase )
is_longer.append(UpperCamelCase )
if truncation == "fusion" and sum(UpperCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_snake_case : str = np.random.randint(0 , len(UpperCamelCase ) )
_snake_case : List[Any] = True
if isinstance(input_mel[0] , UpperCamelCase ):
_snake_case : Union[str, Any] = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_snake_case : int = [[longer] for longer in is_longer]
_snake_case : int = {'input_features': input_mel, 'is_longer': is_longer}
_snake_case : Optional[Any] = BatchFeature(UpperCamelCase )
if return_tensors is not None:
_snake_case : Any = input_features.convert_to_tensors(UpperCamelCase )
return input_features
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> List[Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase , int(b / 2 ) ) * actual_power(lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase , int(b / 2 ) ) * actual_power(lowerCAmelCase , int(b / 2 ) )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase , lowerCAmelCase )
return actual_power(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any ="""pegasus"""
a_ : List[Any] =["""past_key_values"""]
a_ : Any ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , UpperCamelCase : Optional[int]=5_02_65 , UpperCamelCase : Optional[int]=10_24 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=40_96 , UpperCamelCase : Tuple=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=40_96 , UpperCamelCase : Tuple=16 , UpperCamelCase : str=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict="gelu" , UpperCamelCase : Optional[int]=10_24 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : str=0 , UpperCamelCase : Any=False , UpperCamelCase : List[str]=0 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : int=1 , **UpperCamelCase : str , ):
'''simple docstring'''
_snake_case : int = vocab_size
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Dict = d_model
_snake_case : List[str] = encoder_ffn_dim
_snake_case : int = encoder_layers
_snake_case : Optional[Any] = encoder_attention_heads
_snake_case : Optional[Any] = decoder_ffn_dim
_snake_case : Optional[Any] = decoder_layers
_snake_case : List[str] = decoder_attention_heads
_snake_case : str = dropout
_snake_case : Union[str, Any] = attention_dropout
_snake_case : List[str] = activation_dropout
_snake_case : Optional[Any] = activation_function
_snake_case : Dict = init_std
_snake_case : str = encoder_layerdrop
_snake_case : Optional[int] = decoder_layerdrop
_snake_case : Optional[int] = use_cache
_snake_case : Union[str, Any] = encoder_layers
_snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self.d_model
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase_ ( lowerCAmelCase: Callable )-> Callable:
@wraps(lowerCAmelCase )
def _inner_fn(*lowerCAmelCase: str , **lowerCAmelCase: Optional[Any] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowerCAmelCase , )
return fn(*lowerCAmelCase , **lowerCAmelCase )
return _inner_fn
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any =["""speech"""]
def __init__( self : int , *UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple =["""speech"""]
def __init__( self : str , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Dict:
_snake_case : Union[str, Any] = 1
_snake_case : Tuple = 2
while i * i <= n:
_snake_case : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase_ ( )-> Any:
_snake_case : Tuple = 1
_snake_case : Any = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> float:
return base * power(lowerCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
lowerCAmelCase_ = int(input("""Enter the base: """).strip())
lowerCAmelCase_ = int(input("""Enter the exponent: """).strip())
lowerCAmelCase_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase_ = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[int]=13 , UpperCamelCase : int=32 , UpperCamelCase : Any=2 , UpperCamelCase : Dict=3 , UpperCamelCase : List[Any]=16 , UpperCamelCase : Dict=[1, 2, 1] , UpperCamelCase : Tuple=[2, 2, 4] , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=2.0 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[str]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Tuple=False , UpperCamelCase : Any=True , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : List[str]=1e-5 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=True , UpperCamelCase : Any=10 , UpperCamelCase : int=8 , ):
'''simple docstring'''
_snake_case : List[str] = parent
_snake_case : Dict = batch_size
_snake_case : Dict = image_size
_snake_case : List[Any] = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : str = embed_dim
_snake_case : Dict = depths
_snake_case : Any = num_heads
_snake_case : List[Any] = window_size
_snake_case : Optional[Any] = mlp_ratio
_snake_case : Optional[Any] = qkv_bias
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = drop_path_rate
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = use_absolute_embeddings
_snake_case : Union[str, Any] = patch_norm
_snake_case : Dict = layer_norm_eps
_snake_case : Tuple = initializer_range
_snake_case : Dict = is_training
_snake_case : List[str] = scope
_snake_case : Optional[Any] = use_labels
_snake_case : List[Any] = type_sequence_label_size
_snake_case : int = encoder_stride
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = SwinvaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[Any] = model(UpperCamelCase )
_snake_case : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Any = SwinvaForMaskedImageModeling(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : List[Any] = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case : Tuple = 1
_snake_case : Optional[int] = SwinvaForMaskedImageModeling(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : str = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : List[str] = self.type_sequence_label_size
_snake_case : Tuple = SwinvaForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Union[str, Any] = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : str =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
a_ : Optional[Any] =(
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
a_ : Optional[Any] =False
a_ : int =False
a_ : Optional[int] =False
a_ : int =False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = SwinvaModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(UpperCamelCase )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : str = [*signature.parameters.keys()]
_snake_case : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[str] = True
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Dict = True
_snake_case : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_snake_case : Dict = outputs.attentions
_snake_case : Dict = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case : Optional[int] = True
_snake_case : List[str] = config.window_size**2
_snake_case : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_snake_case : str = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_snake_case : int = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_snake_case : Dict = len(UpperCamelCase )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : Dict = True
_snake_case : Optional[int] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_snake_case : str = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_snake_case : List[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) )
_snake_case : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Optional[int] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
_snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
_snake_case : Optional[int] = outputs.hidden_states
_snake_case : Optional[int] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# Swinv2 has a different seq_length
_snake_case : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_snake_case : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_snake_case : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
_snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = reshaped_hidden_states[0].shape
_snake_case : List[str] = (
reshaped_hidden_states[0].view(UpperCamelCase , UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_snake_case : Any = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_snake_case : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_snake_case : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_snake_case : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_snake_case : Tuple = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[Any] = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Tuple = SwinvaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = _config_zero_init(UpperCamelCase )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
UpperCamelCase )
_snake_case : Any = self.default_image_processor
_snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : List[str] = image_processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
_snake_case : Any = model(**UpperCamelCase )
# verify the logits
_snake_case : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
_snake_case : List[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , **lowerCAmelCase: Tuple )-> str:
_snake_case : List[str] = AutoConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
_snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
_snake_case : Dict = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase ) , torch_builtin(UpperCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase ) , gelu_new(UpperCamelCase ) ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : str = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
_snake_case : List[Any] = get_activation('gelu' )
_snake_case : Any = get_activation('gelu_10' )
_snake_case : Optional[int] = torch_builtin(UpperCamelCase )
_snake_case : List[Any] = geluaa(UpperCamelCase )
_snake_case : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(UpperCamelCase ):
get_activation('bogus' )
with self.assertRaises(UpperCamelCase ):
get_activation(UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = get_activation('gelu' )
_snake_case : Optional[Any] = 1
_snake_case : Optional[int] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase ):
_snake_case : int = acta.a
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
from jiwer import compute_measures
import datasets
lowerCAmelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCAmelCase_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
lowerCAmelCase_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(UpperCamelCase , UpperCamelCase )["wer"]
else:
_snake_case : Dict = 0
_snake_case : List[str] = 0
for prediction, reference in zip(UpperCamelCase , UpperCamelCase ):
_snake_case : str = compute_measures(UpperCamelCase , UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
lowerCAmelCase_ = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] )-> Tuple:
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
_snake_case : Optional[Any] = json.loads(f.read() )
_snake_case : List[Any] = collections.OrderedDict()
_snake_case : Optional[int] = collections.OrderedDict()
_snake_case : Tuple = collections.OrderedDict()
with open(lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
_snake_case : Tuple = f.readlines()
_snake_case : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCAmelCase ):
_snake_case : List[str] = b
_snake_case : int = idx
for wd in b:
_snake_case : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =VOCAB_FILES_NAMES
a_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
a_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Dict =["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : int="<|endoftext|>" , UpperCamelCase : List[Any]="<|endoftext|>" , UpperCamelCase : Optional[Any]="<|startoftext|>" , UpperCamelCase : Tuple="<|endoftext|>" , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
unk_token=UpperCamelCase , pad_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , do_clean_text=UpperCamelCase , **UpperCamelCase , )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
_snake_case : Dict = do_clean_text
_snake_case , _snake_case , _snake_case , _snake_case : str = load_vocab_and_emoji(UpperCamelCase , UpperCamelCase )
_snake_case : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(UpperCamelCase , clean=self.do_clean_text )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.vocab.get(UpperCamelCase , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = ''.join(UpperCamelCase ).strip()
return out_string
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : "Conversation" ):
'''simple docstring'''
_snake_case : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
_snake_case : str = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Tuple = 0
if os.path.isdir(UpperCamelCase ):
_snake_case : Tuple = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Any = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
_snake_case : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_snake_case : Dict = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
_snake_case : int = token_index
writer.write(','.join(UpperCamelCase ) + '\n' )
index += 1
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , UpperCamelCase )
return vocab_file, emoji_file
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Any = vocab # same as swe
_snake_case : Any = ids_to_tokens # same as bpe
_snake_case : Any = emoji
_snake_case : Tuple = np.max([len(UpperCamelCase ) for w in self.vocab.keys()] )
_snake_case : Dict = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
_snake_case : Optional[int] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
_snake_case : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
_snake_case : Optional[int] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_snake_case : List[Any] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_snake_case : str = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
_snake_case : List[str] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_snake_case : Any = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_snake_case : Optional[int] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[str] = self.content_repattera.sub('<URL>' , UpperCamelCase )
_snake_case : int = self.content_repattera.sub('<EMAIL>' , UpperCamelCase )
_snake_case : Optional[int] = self.content_repattera.sub('<TEL>' , UpperCamelCase )
_snake_case : Tuple = self.content_repattera.sub('<DATE>' , UpperCamelCase )
_snake_case : Union[str, Any] = self.content_repattera.sub('<DATE>' , UpperCamelCase )
_snake_case : Optional[int] = self.content_repattera.sub('<PRICE>' , UpperCamelCase )
_snake_case : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_snake_case : Tuple = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
_snake_case : Optional[int] = text.replace(' ' , '<SP>' )
_snake_case : Tuple = text.replace(' ' , '<SP>' )
_snake_case : Optional[Any] = text.replace('\r\n' , '<BR>' )
_snake_case : int = text.replace('\n' , '<BR>' )
_snake_case : Union[str, Any] = text.replace('\r' , '<BR>' )
_snake_case : List[Any] = text.replace('\t' , '<TAB>' )
_snake_case : Union[str, Any] = text.replace('—' , 'ー' )
_snake_case : List[Any] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
_snake_case : Optional[int] = text.replace(UpperCamelCase , UpperCamelCase )
if clean:
_snake_case : List[str] = self.clean_text(UpperCamelCase )
def check_simbol(UpperCamelCase : Dict ):
_snake_case : Any = x.encode()
if len(UpperCamelCase ) == 1 and len(UpperCamelCase ) == 2:
_snake_case : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(UpperCamelCase : Optional[Any] ):
_snake_case : Optional[int] = x.encode()
if len(UpperCamelCase ) == 1 and len(UpperCamelCase ) == 3:
_snake_case : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
_snake_case : List[str] = 0
_snake_case : Dict = []
while pos < len(UpperCamelCase ):
_snake_case : List[str] = min(len(UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_snake_case : Union[str, Any] = [] # (token_id, token, pos)
for e in range(UpperCamelCase , UpperCamelCase , -1 ):
_snake_case : Dict = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase ) > 2:
_snake_case : Dict = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase ) > 0:
# the smallest token_id is adopted
_snake_case , _snake_case , _snake_case : Any = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[0] )[0]
result.append(UpperCamelCase )
_snake_case : Optional[Any] = e
else:
_snake_case : List[str] = pos + 1
_snake_case : List[Any] = text[pos:end]
if check_simbol(UpperCamelCase ):
result.append('<KIGOU>' )
elif checkuae(UpperCamelCase ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
_snake_case : Optional[int] = end
return result
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : int="\n" ):
'''simple docstring'''
_snake_case : str = []
_snake_case : str = []
_snake_case : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase ) > 0:
words.append(bytearray(UpperCamelCase ).decode('utf-8' , errors='replace' ) )
_snake_case : int = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(UpperCamelCase )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(UpperCamelCase )
if len(UpperCamelCase ) > 0:
words.append(bytearray(UpperCamelCase ).decode('utf-8' , errors='replace' ) )
_snake_case : List[Any] = ''.join(UpperCamelCase )
return text
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: int )-> list[int]:
_snake_case : Union[str, Any] = str(lowerCAmelCase )
_snake_case : Tuple = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool:
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: int = 11 )-> list[int]:
_snake_case : list[int] = []
_snake_case : int = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
_snake_case : int = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( )-> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from __future__ import annotations
lowerCAmelCase_ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : dict[str, list[str]] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Dict = graph
# mapping node to its parent in resulting breadth first tree
_snake_case : dict[str, str | None] = {}
_snake_case : Any = source_vertex
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = {self.source_vertex}
_snake_case : Tuple = None
_snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
_snake_case : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase )
_snake_case : str = vertex
queue.append(UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_snake_case : List[Any] = self.parent.get(UpperCamelCase )
if target_vertex_parent is None:
_snake_case : Tuple = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(UpperCamelCase )
return self.shortest_path(UpperCamelCase ) + f"""->{target_vertex}"""
if __name__ == "__main__":
lowerCAmelCase_ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCamelCase ) for s in shape] )}.npy"""
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Optional[int]=(4, 4, 64, 64) , UpperCamelCase : List[str]=False ):
'''simple docstring'''
_snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_snake_case : Tuple = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase , UpperCamelCase ) ) , dtype=UpperCamelCase )
return image
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Tuple=False , UpperCamelCase : Union[str, Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
_snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
_snake_case : Optional[Any] = 'bf16' if fpaa else None
_snake_case , _snake_case : Tuple = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase , subfolder='unet' , dtype=UpperCamelCase , revision=UpperCamelCase )
return model, params
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int=0 , UpperCamelCase : List[str]=(4, 77, 7_68) , UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
_snake_case : Dict = jnp.bfloataa if fpaa else jnp.floataa
_snake_case : Dict = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase , UpperCamelCase ) ) , dtype=UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 10_00, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=UpperCamelCase )
_snake_case : List[str] = self.get_latents(UpperCamelCase , fpaa=UpperCamelCase )
_snake_case : Optional[int] = self.get_encoder_hidden_states(UpperCamelCase , fpaa=UpperCamelCase )
_snake_case : Optional[int] = model.apply(
{'params': params} , UpperCamelCase , jnp.array(UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase , ).sample
assert sample.shape == latents.shape
_snake_case : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case : Optional[int] = jnp.array(UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 10_00, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=UpperCamelCase )
_snake_case : str = self.get_latents(UpperCamelCase , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase )
_snake_case : Dict = self.get_encoder_hidden_states(UpperCamelCase , shape=(4, 77, 10_24) , fpaa=UpperCamelCase )
_snake_case : List[str] = model.apply(
{'params': params} , UpperCamelCase , jnp.array(UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase , ).sample
assert sample.shape == latents.shape
_snake_case : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_snake_case : Tuple = jnp.array(UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 )
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = 10
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = [1, 2, 3, 4]
_snake_case : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase , self.block_size , 0 ) , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_snake_case : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase , self.block_size , 0 ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_snake_case : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase , self.block_size , 0 ) , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_snake_case , _snake_case : List[str] = process_story(UpperCamelCase )
self.assertEqual(UpperCamelCase , [] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = ''
_snake_case , _snake_case : Union[str, Any] = process_story(UpperCamelCase )
self.assertEqual(UpperCamelCase , [] )
self.assertEqual(UpperCamelCase , [] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_snake_case , _snake_case : Union[str, Any] = process_story(UpperCamelCase )
_snake_case : Union[str, Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : int = ['It was the best of times.']
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = torch.tensor([1, 2, 3, 4] )
_snake_case : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_snake_case : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_snake_case : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Dict = 1_01
_snake_case : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
_snake_case : str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_snake_case : Dict = compute_token_type_ids(UpperCamelCase , UpperCamelCase )
np.testing.assert_array_equal(UpperCamelCase , UpperCamelCase )
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=0.01 , UpperCamelCase : Optional[Any]=10_00 ):
'''simple docstring'''
_snake_case : Tuple = p_stop
_snake_case : Dict = max_length
def __iter__( self : Any ):
'''simple docstring'''
_snake_case : Tuple = 0
_snake_case : Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
_snake_case : str = random.random() < self.p_stop
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=False , UpperCamelCase : List[str]=True ):
'''simple docstring'''
_snake_case : Optional[Any] = [
BatchSamplerShard(UpperCamelCase , 2 , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
for i in range(2 )
]
_snake_case : Any = [list(UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase ) for shard in batch_sampler_shards] , [len(UpperCamelCase ) for e in expected] )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_snake_case : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_snake_case : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_snake_case : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_snake_case : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is very small.
_snake_case : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Any = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
_snake_case : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Optional[int] = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_snake_case : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_snake_case : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_snake_case : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
_snake_case : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
_snake_case : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Dict = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
_snake_case : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
_snake_case : int = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
_snake_case : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : int = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
_snake_case : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_snake_case : int = [BatchSamplerShard(UpperCamelCase , 2 , UpperCamelCase , even_batches=UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=2 , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
random.seed(UpperCamelCase )
_snake_case : Optional[Any] = list(UpperCamelCase )
_snake_case : Any = [
IterableDatasetShard(
UpperCamelCase , batch_size=UpperCamelCase , drop_last=UpperCamelCase , num_processes=UpperCamelCase , process_index=UpperCamelCase , split_batches=UpperCamelCase , )
for i in range(UpperCamelCase )
]
_snake_case : Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase )
iterable_dataset_lists.append(list(UpperCamelCase ) )
_snake_case : str = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_snake_case : Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
self.assertTrue(len(UpperCamelCase ) % shard_batch_size == 0 )
_snake_case : Tuple = []
for idx in range(0 , len(UpperCamelCase ) , UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase ) < len(UpperCamelCase ):
reference += reference
self.assertListEqual(UpperCamelCase , reference[: len(UpperCamelCase )] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = 42
_snake_case : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
# Edge case with a very small dataset
_snake_case : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase )
_snake_case : int = SkipBatchSampler(UpperCamelCase , 2 )
self.assertListEqual(list(UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
_snake_case : int = skip_first_batches(UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
Accelerator()
_snake_case : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
_snake_case : Any = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case : str = hex_num[0] == '-'
if is_negative:
_snake_case : List[str] = hex_num[1:]
try:
_snake_case : Union[str, Any] = int(lowerCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case : Dict = ''
while int_num > 0:
_snake_case : Optional[int] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any ="""timm_backbone"""
def __init__( self : Optional[int] , UpperCamelCase : Dict=None , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Dict=True , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : str = backbone
_snake_case : str = num_channels
_snake_case : Optional[Any] = features_only
_snake_case : List[Any] = use_pretrained_backbone
_snake_case : Union[str, Any] = True
_snake_case : Any = out_indices if out_indices is not None else (-1,)
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
# word like '180' or '身高' or '神'
for char in word:
_snake_case : List[Any] = ord(lowerCAmelCase )
if not _is_chinese_char(lowerCAmelCase ):
return 0
return 1
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> int:
_snake_case : List[Any] = set()
for token in tokens:
_snake_case : Tuple = len(lowerCAmelCase ) > 1 and is_chinese(lowerCAmelCase )
if chinese_word:
word_set.add(lowerCAmelCase )
_snake_case : Tuple = list(lowerCAmelCase )
return word_list
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: set() )-> Optional[int]:
if not chinese_word_set:
return bert_tokens
_snake_case : Optional[Any] = max([len(lowerCAmelCase ) for w in chinese_word_set] )
_snake_case : str = bert_tokens
_snake_case , _snake_case : List[Any] = 0, len(lowerCAmelCase )
while start < end:
_snake_case : Optional[int] = True
if is_chinese(bert_word[start] ):
_snake_case : List[Any] = min(end - start , lowerCAmelCase )
for i in range(lowerCAmelCase , 1 , -1 ):
_snake_case : int = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_snake_case : Optional[Any] = '##' + bert_word[j]
_snake_case : Any = start + i
_snake_case : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: LTP , lowerCAmelCase: BertTokenizer )-> List[str]:
_snake_case : str = []
for i in range(0 , len(lowerCAmelCase ) , 1_00 ):
_snake_case : List[str] = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
_snake_case : List[str] = [get_chinese_word(lowerCAmelCase ) for r in res]
ltp_res.extend(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
_snake_case : List[Any] = []
for i in range(0 , len(lowerCAmelCase ) , 1_00 ):
_snake_case : int = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCAmelCase , truncation=lowerCAmelCase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
_snake_case : List[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = []
for id in input_ids:
_snake_case : int = bert_tokenizer._convert_id_to_token(lowerCAmelCase )
input_tokens.append(lowerCAmelCase )
_snake_case : int = add_sub_symbol(lowerCAmelCase , lowerCAmelCase )
_snake_case : int = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase ):
if token[:2] == "##":
_snake_case : List[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase ) == 1 and _is_chinese_char(ord(lowerCAmelCase ) ):
ref_id.append(lowerCAmelCase )
ref_ids.append(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
return ref_ids
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_snake_case : List[str] = f.readlines()
_snake_case : str = [line.strip() for line in data if len(lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_snake_case : str = LTP(args.ltp ) # faster in GPU device
_snake_case : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_snake_case : Optional[Any] = prepare_ref(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_snake_case : str = [json.dumps(lowerCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
lowerCAmelCase_ = parser.parse_args()
main(args)
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =(CMStochasticIterativeScheduler,)
a_ : Any =10
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = {
'num_train_timesteps': 2_01,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = 10
_snake_case : Tuple = self.get_scheduler_config()
_snake_case : Optional[Any] = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
_snake_case : List[str] = scheduler.timesteps[0]
_snake_case : Union[str, Any] = scheduler.timesteps[1]
_snake_case : Any = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
_snake_case : Tuple = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
_snake_case : Union[str, Any] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config()
_snake_case : List[Any] = scheduler_class(**UpperCamelCase )
_snake_case : Tuple = 1
scheduler.set_timesteps(UpperCamelCase )
_snake_case : Any = scheduler.timesteps
_snake_case : Dict = torch.manual_seed(0 )
_snake_case : Optional[int] = self.dummy_model()
_snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
_snake_case : Optional[int] = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
_snake_case : Optional[Any] = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
_snake_case : Any = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
_snake_case : Tuple = pred_prev_sample
_snake_case : List[str] = torch.sum(torch.abs(UpperCamelCase ) )
_snake_case : Optional[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1e-2
assert abs(result_mean.item() - 0.25_10 ) < 1e-3
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**UpperCamelCase )
_snake_case : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
_snake_case : Optional[Any] = scheduler.timesteps
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_snake_case : int = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
_snake_case : Union[str, Any] = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
_snake_case : Optional[int] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
_snake_case : Union[str, Any] = pred_prev_sample
_snake_case : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase ) )
_snake_case : Tuple = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1e-2
assert abs(result_mean.item() - 0.45_27 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**UpperCamelCase )
_snake_case : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : List[Any] = self.get_scheduler_config()
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Dict = [39, 30, 12, 1, 0]
_snake_case : Union[str, Any] = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =["""image_processor""", """tokenizer"""]
a_ : Dict ="""AutoImageProcessor"""
a_ : str ="""AutoTokenizer"""
def __init__( self : Optional[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : int=None , **UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : int = kwargs.pop('feature_extractor' )
_snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = self.image_processor
_snake_case : Optional[Any] = False
def __call__( self : str , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase , **UpperCamelCase )
_snake_case : str = kwargs.pop('images' , UpperCamelCase )
_snake_case : Optional[int] = kwargs.pop('text' , UpperCamelCase )
if len(UpperCamelCase ) > 0:
_snake_case : Optional[Any] = args[0]
_snake_case : Dict = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_snake_case : Dict = self.image_processor(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
if text is not None:
_snake_case : Optional[Any] = self.tokenizer(UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : Optional[Any] = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@contextmanager
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
_snake_case : Any = True
_snake_case : List[str] = self.tokenizer
yield
_snake_case : Optional[int] = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : int = self.tokenizer.get_added_vocab()
_snake_case : Optional[int] = {}
while tokens:
_snake_case : Optional[Any] = re.search(R'<s_(.*?)>' , UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_snake_case : Tuple = start_token.group(1 )
_snake_case : str = re.search(Rf"""</s_{key}>""" , UpperCamelCase , re.IGNORECASE )
_snake_case : int = start_token.group()
if end_token is None:
_snake_case : str = tokens.replace(UpperCamelCase , '' )
else:
_snake_case : Optional[Any] = end_token.group()
_snake_case : Tuple = re.escape(UpperCamelCase )
_snake_case : Any = re.escape(UpperCamelCase )
_snake_case : Union[str, Any] = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCamelCase , re.IGNORECASE )
if content is not None:
_snake_case : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : Optional[int] = self.tokenajson(UpperCamelCase , is_inner_value=UpperCamelCase , added_vocab=UpperCamelCase )
if value:
if len(UpperCamelCase ) == 1:
_snake_case : List[Any] = value[0]
_snake_case : Optional[Any] = value
else: # leaf nodes
_snake_case : int = []
for leaf in content.split(R'<sep/>' ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCamelCase )
if len(output[key] ) == 1:
_snake_case : Optional[Any] = output[key][0]
_snake_case : str = tokens[tokens.find(UpperCamelCase ) + len(UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCamelCase , added_vocab=UpperCamelCase )
if len(UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , )
return self.image_processor
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> list:
if n_term == "":
return []
_snake_case : list = []
for temp in range(int(lowerCAmelCase ) ):
series.append(F"""1/{temp + 1}""" if series else '1' )
return series
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a_ : int =field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a_ : Optional[str] =field(default=UpperCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
_snake_case : List[Any] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_snake_case : int = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCamelCase_ ( )-> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case : Any = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_snake_case : str = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_snake_case : Tuple = data_args.train_file.split('.' )[-1]
_snake_case : Tuple = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_snake_case : Union[str, Any] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_snake_case : Tuple = load_dataset('csv' , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_snake_case : Dict = load_dataset('json' , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_snake_case : Tuple = raw_datasets['train'].features['label'].names
_snake_case : Union[str, Any] = len(lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_snake_case : Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase , )
_snake_case : Optional[int] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_snake_case : Optional[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_snake_case : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_snake_case : Any = {'Refused': 0, 'Entailed': 1}
_snake_case : Optional[Any] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_snake_case : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase: Dict ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase: List[str] ):
_snake_case : Dict = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_snake_case : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_snake_case : List[str] = examples['statement']
_snake_case : List[str] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_snake_case : Optional[Any] = tokenizer(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase )
_snake_case : int = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_snake_case : List[Any] = raw_datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_snake_case : Dict = raw_datasets['train']
if data_args.max_train_samples is not None:
_snake_case : List[str] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_snake_case : Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_snake_case : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_snake_case : int = raw_datasets['test']
if data_args.max_predict_samples is not None:
_snake_case : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: EvalPrediction ):
_snake_case : int = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions
_snake_case : Optional[int] = np.argmax(lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_snake_case : int = default_data_collator
elif training_args.fpaa:
_snake_case : Any = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 )
else:
_snake_case : str = None
# Initialize our Trainer
_snake_case : Optional[int] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
_snake_case : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : Dict = last_checkpoint
_snake_case : str = trainer.train(resume_from_checkpoint=lowerCAmelCase )
_snake_case : Optional[Any] = train_result.metrics
_snake_case : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_snake_case : Tuple = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase )
trainer.save_metrics('train' , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : str = trainer.evaluate(eval_dataset=lowerCAmelCase )
_snake_case : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_snake_case : str = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_snake_case : int = predict_dataset.remove_columns('label' )
_snake_case : Optional[Any] = trainer.predict(lowerCAmelCase , metric_key_prefix='predict' ).predictions
_snake_case : Dict = np.argmax(lowerCAmelCase , axis=1 )
_snake_case : str = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase ):
_snake_case : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
_snake_case : int = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: int )-> str:
from transformers.testing_utils import pytest_terminal_summary_main
_snake_case : List[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase , id=lowerCAmelCase )
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = inspect.getfile(accelerate.test_utils )
_snake_case : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case : List[Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
_snake_case : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar("""T""")
class _lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
a_ : deque[T] # Cache store of keys
a_ : set[T] # References of the keys in cache
a_ : int =10 # Maximum capacity of cache
def __init__( self : Optional[Any] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Dict = deque()
_snake_case : str = set()
if not n:
_snake_case : Dict = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_snake_case : Any = n
def UpperCamelCase_ ( self : Dict , UpperCamelCase : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_snake_case : Dict = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase )
else:
self.dq_store.remove(UpperCamelCase )
self.dq_store.appendleft(UpperCamelCase )
self.key_reference.add(UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase )
def __repr__( self : Dict ):
'''simple docstring'''
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase_ = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(UpperCAmelCase_ )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""rag"""
a_ : str =True
def __init__( self : Union[str, Any] , UpperCamelCase : int=None , UpperCamelCase : str=True , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=" / " , UpperCamelCase : List[str]=" // " , UpperCamelCase : Any=5 , UpperCamelCase : Optional[int]=3_00 , UpperCamelCase : Dict=7_68 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Tuple="wiki_dpr" , UpperCamelCase : List[Any]="train" , UpperCamelCase : Optional[int]="compressed" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Tuple=False , UpperCamelCase : Tuple=0.0 , UpperCamelCase : str=True , UpperCamelCase : str=False , UpperCamelCase : int=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Tuple = kwargs.pop('question_encoder' )
_snake_case : int = question_encoder_config.pop('model_type' )
_snake_case : Dict = kwargs.pop('generator' )
_snake_case : Optional[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
_snake_case : Dict = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = reduce_loss
_snake_case : Optional[Any] = label_smoothing
_snake_case : List[str] = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Any = title_sep
_snake_case : Optional[Any] = doc_sep
_snake_case : Union[str, Any] = n_docs
_snake_case : Dict = max_combined_length
_snake_case : List[str] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Dict = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : List[str] = retrieval_batch_size
_snake_case : Dict = passages_path
_snake_case : List[Any] = index_path
_snake_case : Tuple = use_dummy_dataset
_snake_case : int = output_retrieved
_snake_case : str = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator , 'forced_eos_token_id' , UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Any , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = copy.deepcopy(self.__dict__ )
_snake_case : Optional[int] = self.question_encoder.to_dict()
_snake_case : int = self.generator.to_dict()
_snake_case : str = self.__class__.model_type
return output
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
def wrapper(*lowerCAmelCase: Tuple , **lowerCAmelCase: int ):
_snake_case : Any = timeit.default_timer()
_snake_case : Dict = func(*lowerCAmelCase , **lowerCAmelCase )
_snake_case : Optional[int] = timeit.default_timer() - starttime
return delta
_snake_case : List[str] = func.__name__
return wrapper
def lowerCamelCase_ ( lowerCAmelCase: dict , lowerCAmelCase: Dict=1_00 , lowerCAmelCase: Optional[Any]=None )-> List[Any]:
_snake_case : Optional[int] = []
_snake_case : List[str] = seq_shapes or {}
for i in range(lowerCAmelCase ):
_snake_case : Optional[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
_snake_case : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
_snake_case : Optional[Any] = 'The small grey turtle was surprisingly fast when challenged.'
else:
_snake_case : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
_snake_case : Optional[Any] = v.feature
_snake_case : Optional[int] = seq_shapes[k]
_snake_case : Tuple = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
_snake_case : Optional[int] = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: Dict=1_00 , lowerCAmelCase: Optional[Any]=None )-> Dict:
_snake_case : List[Any] = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
_snake_case : Union[str, Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
_snake_case , _snake_case : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
_snake_case : Optional[Any] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =["""note_seq"""]
def __init__( self : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['note_seq'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : List[str] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['note_seq'] )
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCAmelCase_ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Any ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_snake_case : Tuple = FlaxBertModel(UpperCamelCase )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
_snake_case : str = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_snake_case : List[str] = flatten_dict(unfreeze(model.params ) )
_snake_case : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_snake_case : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase , repo_id='test-model-flax' , push_to_hub=UpperCamelCase , use_auth_token=self._token )
_snake_case : str = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
_snake_case : Any = flatten_dict(unfreeze(model.params ) )
_snake_case : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_snake_case : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_snake_case : Union[str, Any] = FlaxBertModel(UpperCamelCase )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
_snake_case : int = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_snake_case : Dict = flatten_dict(unfreeze(model.params ) )
_snake_case : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_snake_case : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCamelCase , repo_id='valid_org/test-model-flax-org' , push_to_hub=UpperCamelCase , use_auth_token=self._token )
_snake_case : Union[str, Any] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
_snake_case : List[str] = flatten_dict(unfreeze(model.params ) )
_snake_case : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_snake_case : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase , 1e-3 , msg=f"""{key} not identical""" )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Optional[Any] )-> Optional[int]:
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = flatten_dict(modela.params )
_snake_case : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
_snake_case : int = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : int = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_snake_case : Tuple = FlaxBertModel(UpperCamelCase )
_snake_case : List[Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase , UpperCamelCase ) )
with self.assertRaises(UpperCamelCase ):
_snake_case : Any = FlaxBertModel.from_pretrained(UpperCamelCase )
_snake_case : List[str] = FlaxBertModel.from_pretrained(UpperCamelCase , subfolder=UpperCamelCase )
self.assertTrue(check_models_equal(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_snake_case : int = FlaxBertModel(UpperCamelCase )
_snake_case : Any = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase , UpperCamelCase ) , max_shard_size='10KB' )
with self.assertRaises(UpperCamelCase ):
_snake_case : List[Any] = FlaxBertModel.from_pretrained(UpperCamelCase )
_snake_case : Any = FlaxBertModel.from_pretrained(UpperCamelCase , subfolder=UpperCamelCase )
self.assertTrue(check_models_equal(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = 'bert'
_snake_case : Any = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(UpperCamelCase ):
_snake_case : List[Any] = FlaxBertModel.from_pretrained(UpperCamelCase )
_snake_case : Optional[Any] = FlaxBertModel.from_pretrained(UpperCamelCase , subfolder=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = 'bert'
_snake_case : Tuple = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(UpperCamelCase ):
_snake_case : List[str] = FlaxBertModel.from_pretrained(UpperCamelCase )
_snake_case : Tuple = FlaxBertModel.from_pretrained(UpperCamelCase , subfolder=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _lowerCAmelCase :
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
raise NotImplementedError()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
raise NotImplementedError()
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : "AutoTokenizer" , UpperCamelCase : bool = False , **UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = tokenizer
_snake_case : Union[str, Any] = skip_prompt
_snake_case : List[Any] = decode_kwargs
# variables used in the streaming process
_snake_case : List[Any] = []
_snake_case : Tuple = 0
_snake_case : int = True
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
_snake_case : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_snake_case : Dict = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_snake_case : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
_snake_case : Tuple = text[self.print_len :]
_snake_case : str = []
_snake_case : Optional[int] = 0
# If the last token is a CJK character, we print the characters.
elif len(UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_snake_case : str = text[self.print_len :]
self.print_len += len(UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_snake_case : Optional[int] = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(UpperCamelCase )
self.on_finalized_text(UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if len(self.token_cache ) > 0:
_snake_case : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_snake_case : List[str] = text[self.print_len :]
_snake_case : Any = []
_snake_case : List[str] = 0
else:
_snake_case : Optional[int] = ''
_snake_case : int = True
self.on_finalized_text(UpperCamelCase , stream_end=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
print(UpperCamelCase , flush=UpperCamelCase , end='' if not stream_end else None )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : "AutoTokenizer" , UpperCamelCase : bool = False , UpperCamelCase : Optional[float] = None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
_snake_case : int = Queue()
_snake_case : Tuple = None
_snake_case : Optional[Any] = timeout
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
self.text_queue.put(UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] ="""mobilenet_v2"""
def __init__( self : Dict , UpperCamelCase : str=3 , UpperCamelCase : int=2_24 , UpperCamelCase : Any=1.0 , UpperCamelCase : Optional[Any]=8 , UpperCamelCase : Tuple=8 , UpperCamelCase : List[str]=6 , UpperCamelCase : str=32 , UpperCamelCase : int=True , UpperCamelCase : str=True , UpperCamelCase : Dict="relu6" , UpperCamelCase : int=True , UpperCamelCase : Tuple=0.8 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : List[Any]=0.0_01 , UpperCamelCase : List[Any]=2_55 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_snake_case : List[str] = num_channels
_snake_case : Tuple = image_size
_snake_case : Any = depth_multiplier
_snake_case : Tuple = depth_divisible_by
_snake_case : Union[str, Any] = min_depth
_snake_case : Tuple = expand_ratio
_snake_case : Dict = output_stride
_snake_case : List[Any] = first_layer_is_expansion
_snake_case : Union[str, Any] = finegrained_output
_snake_case : Dict = hidden_act
_snake_case : Any = tf_padding
_snake_case : str = classifier_dropout_prob
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : List[str] = semantic_loss_ignore_index
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 1e-4
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = 'hf-internal-testing/tiny-random-t5'
_snake_case : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase )
_snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
_snake_case : Tuple = tokenizer('This is me' , return_tensors='pt' )
_snake_case : Union[str, Any] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case : Any = model.generate(**UpperCamelCase )
_snake_case : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
_snake_case : str = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case : Optional[Any] = model_reloaded.generate(**UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : str = 'hf-internal-testing/tiny-random-t5'
_snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase )
_snake_case : List[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase ):
model.save_pretrained(UpperCamelCase )
_snake_case : Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
a_ : Optional[datasets.Features] =None
def lowerCamelCase_ ( lowerCAmelCase: "pyspark.sql.DataFrame" , lowerCAmelCase: List[int] , )-> Tuple:
import pyspark
def generate_fn():
_snake_case : Tuple = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
_snake_case : Any = df_with_partition_id.select('*' ).where(F"""part_id = {partition_id}""" ).drop('part_id' )
_snake_case : Optional[int] = partition_df.collect()
_snake_case : Optional[Any] = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _lowerCAmelCase ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : Tuple=None , ):
'''simple docstring'''
_snake_case : int = df
_snake_case : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_snake_case : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : np.random.Generator ):
'''simple docstring'''
_snake_case : List[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : List[Any] = self.split_shard_indices_by_worker(UpperCamelCase , UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase )
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _lowerCAmelCase ( datasets.DatasetBuilder ):
'''simple docstring'''
a_ : int =SparkConfig
def __init__( self : str , UpperCamelCase : "pyspark.sql.DataFrame" , UpperCamelCase : str = None , UpperCamelCase : str = None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
import pyspark
_snake_case : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
_snake_case : str = df
_snake_case : Optional[Any] = working_dir
super().__init__(
cache_dir=UpperCamelCase , config_name=str(self.df.semanticHash() ) , **UpperCamelCase , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
def create_cache_and_write_probe(UpperCamelCase : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase )
_snake_case : List[str] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_snake_case : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCamelCase : Optional[int] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
_snake_case : Union[str, Any] = self.df.count()
_snake_case : Tuple = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_snake_case : int = (
self.df.limit(UpperCamelCase )
.repartition(1 )
.mapInArrow(UpperCamelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_snake_case : Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_snake_case : Dict = min(UpperCamelCase , int(approx_total_size / max_shard_size ) )
_snake_case : Optional[int] = self.df.repartition(UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int , ):
'''simple docstring'''
import pyspark
_snake_case : str = ParquetWriter if file_format == 'parquet' else ArrowWriter
_snake_case : Optional[Any] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase ) ) if self._working_dir else fpath
_snake_case : List[str] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_snake_case : List[Any] = self.config.features
_snake_case : Tuple = self._writer_batch_size
_snake_case : str = self._fs.storage_options
def write_arrow(UpperCamelCase : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_snake_case : str = pyspark.TaskContext().taskAttemptId()
_snake_case : List[Any] = next(UpperCamelCase , UpperCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
_snake_case : int = 0
_snake_case : List[Any] = writer_class(
features=UpperCamelCase , path=working_fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_snake_case : Any = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_snake_case , _snake_case : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
_snake_case : Tuple = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase , storage_options=UpperCamelCase , embed_local_files=UpperCamelCase , )
_snake_case : Optional[Any] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase )
if writer._num_bytes > 0:
_snake_case , _snake_case : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase ) ):
_snake_case : List[Any] = os.path.join(os.path.dirname(UpperCamelCase ) , os.path.basename(UpperCamelCase ) )
shutil.move(UpperCamelCase , UpperCamelCase )
_snake_case : int = (
self.df.mapInArrow(UpperCamelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : "datasets.SplitGenerator" , UpperCamelCase : str = "arrow" , UpperCamelCase : Optional[Union[str, int]] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : str , ):
'''simple docstring'''
self._validate_cache_dir()
_snake_case : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase )
_snake_case : List[Any] = not is_remote_filesystem(self._fs )
_snake_case : str = os.path.join if is_local else posixpath.join
_snake_case : Dict = '-TTTTT-SSSSS-of-NNNNN'
_snake_case : int = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_snake_case : int = path_join(self._output_dir , UpperCamelCase )
_snake_case : int = 0
_snake_case : List[Any] = 0
_snake_case : List[str] = 0
_snake_case : List[str] = []
_snake_case : Optional[int] = []
for task_id, content in self._prepare_split_single(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase )
_snake_case : Tuple = total_num_examples
_snake_case : str = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_snake_case : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_snake_case : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , ):
rename(
UpperCamelCase , fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , f"""{global_shard_id:05d}""" ).replace('NNNNN' , f"""{total_shards:05d}""" ) , )
_snake_case : Tuple = []
_snake_case : Optional[Any] = 0
for i in range(len(UpperCamelCase ) ):
_snake_case , _snake_case : Any = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase , len(UpperCamelCase ) ).map(lambda UpperCamelCase : _rename_shard(*UpperCamelCase ) ).collect()
else:
# don't use any pattern
_snake_case : Union[str, Any] = 0
_snake_case : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"""{shard_id:05d}""" ).replace('TTTTT' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase , '' ) , )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[List[PIL.Image.Image], np.ndarray]
a_ : Optional[List[bool]]
a_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
from itertools import product
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> list[int]:
_snake_case : int = sides_number
_snake_case : Any = max_face_number * dice_number
_snake_case : Optional[Any] = [0] * (max_total + 1)
_snake_case : Optional[Any] = 1
_snake_case : Optional[int] = range(lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ):
_snake_case : List[Any] = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase_ ( )-> float:
_snake_case : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_snake_case : int = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_snake_case : str = 0
_snake_case : int = 9
_snake_case : int = 4 * 9
_snake_case : Union[str, Any] = 6
for peter_total in range(lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_snake_case : List[str] = (4**9) * (6**6)
_snake_case : str = peter_wins_count / total_games_number
_snake_case : Optional[Any] = round(lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Dict =CLIPTokenizer
a_ : str =CLIPTokenizerFast
a_ : str =True
a_ : Union[str, Any] ={}
a_ : List[str] =False
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
# fmt: off
_snake_case : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case : Optional[Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_snake_case : Tuple = {'unk_token': '<unk>'}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : List[str] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : int = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : Tuple = 'lower newer'
_snake_case : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_snake_case : Union[str, Any] = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : List[str] = tokens + [tokenizer.unk_token]
_snake_case : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
@require_ftfy
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : str = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : int = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_snake_case : Dict = tokenizer_s.tokenize(UpperCamelCase )
_snake_case : Dict = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_snake_case : Union[str, Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
_snake_case : Any = tokenizer_s.tokenize(UpperCamelCase )
_snake_case : int = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
_snake_case : int = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_snake_case : Tuple = tokenizer_s.tokenize(UpperCamelCase )
_snake_case : Optional[Any] = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
_snake_case : Optional[int] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_snake_case : List[str] = tokenizer_s.tokenize(UpperCamelCase )
_snake_case : List[str] = tokenizer_r.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : int = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , )
_snake_case : int = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : Tuple = f""" {text}"""
_snake_case : str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , )
_snake_case : Optional[Any] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : str=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=99 , UpperCamelCase : Optional[Any]=64 , UpperCamelCase : int=32 , UpperCamelCase : int=5 , UpperCamelCase : str=4 , UpperCamelCase : str=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : str=16 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Dict=3 , UpperCamelCase : Dict=4 , UpperCamelCase : Any=None , ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : Tuple = seq_length
_snake_case : int = is_training
_snake_case : Dict = use_input_mask
_snake_case : Any = use_token_type_ids
_snake_case : int = use_labels
_snake_case : str = vocab_size
_snake_case : int = hidden_size
_snake_case : List[str] = embedding_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Tuple = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[str] = type_sequence_label_size
_snake_case : Optional[Any] = initializer_range
_snake_case : List[str] = num_labels
_snake_case : Tuple = num_choices
_snake_case : Union[str, Any] = scope
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = None
if self.use_input_mask:
_snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Union[str, Any] = None
if self.use_token_type_ids:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : int = None
_snake_case : Any = None
_snake_case : List[str] = None
if self.use_labels:
_snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Dict = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = MobileBertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
_snake_case : Optional[Any] = model(UpperCamelCase , token_type_ids=UpperCamelCase )
_snake_case : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = MobileBertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : str = MobileBertForNextSentencePrediction(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : str = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = MobileBertForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Any = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , next_sentence_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[str] = MobileBertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Union[str, Any] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.num_labels
_snake_case : Union[str, Any] = MobileBertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Any = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Any = MobileBertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : List[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.num_choices
_snake_case : Union[str, Any] = MobileBertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Optional[int] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : List[Any] =(
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : str =True
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
_snake_case : Any = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
_snake_case : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
_snake_case : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = MobileBertModelTester(self )
_snake_case : int = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> str:
return torch.tensor(
lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase , )
lowerCAmelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : str = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(UpperCamelCase )
_snake_case : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_snake_case : Optional[int] = model(UpperCamelCase )[0]
_snake_case : Optional[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , UpperCamelCase )
_snake_case : int = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=UpperCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_snake_case : int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_snake_case : int = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCAmelCase_ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCAmelCase_ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=None , UpperCamelCase : Tuple="auto" , UpperCamelCase : str=-1 , UpperCamelCase : Tuple=0.9 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : Any=5_00 , UpperCamelCase : Optional[Any]="gpt2-large" , UpperCamelCase : Optional[Any]=-1 , UpperCamelCase : List[str]=10_24 , UpperCamelCase : Optional[int]=25 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=25 , ):
'''simple docstring'''
_snake_case : Dict = compute_mauve(
p_text=UpperCamelCase , q_text=UpperCamelCase , p_features=UpperCamelCase , q_features=UpperCamelCase , p_tokens=UpperCamelCase , q_tokens=UpperCamelCase , num_buckets=UpperCamelCase , pca_max_data=UpperCamelCase , kmeans_explained_var=UpperCamelCase , kmeans_num_redo=UpperCamelCase , kmeans_max_iter=UpperCamelCase , featurize_model_name=UpperCamelCase , device_id=UpperCamelCase , max_text_length=UpperCamelCase , divergence_curve_discretization_size=UpperCamelCase , mauve_scaling_factor=UpperCamelCase , verbose=UpperCamelCase , seed=UpperCamelCase , )
return out
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase_ ( )-> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =["""image_processor"""]
a_ : Optional[Any] ="""SamImageProcessor"""
def __init__( self : Tuple , UpperCamelCase : Any ):
'''simple docstring'''
super().__init__(UpperCamelCase )
_snake_case : Dict = self.image_processor
_snake_case : str = -10
_snake_case : int = self.image_processor.size['longest_edge']
def __call__( self : int , UpperCamelCase : Any=None , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : str , ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : int = encoding_image_processor['original_sizes']
if hasattr(UpperCamelCase , 'numpy' ): # Checks if Torch or TF tensor
_snake_case : str = original_sizes.numpy()
_snake_case , _snake_case , _snake_case : List[str] = self._check_and_preprocess_points(
input_points=UpperCamelCase , input_labels=UpperCamelCase , input_boxes=UpperCamelCase , )
_snake_case : Optional[Any] = self._normalize_and_convert(
UpperCamelCase , UpperCamelCase , input_points=UpperCamelCase , input_labels=UpperCamelCase , input_boxes=UpperCamelCase , return_tensors=UpperCamelCase , )
return encoding_image_processor
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : int="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , UpperCamelCase , UpperCamelCase )
for point, original_size in zip(UpperCamelCase , UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case , _snake_case : Optional[Any] = self._pad_points_and_labels(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = np.array(UpperCamelCase )
if input_labels is not None:
_snake_case : Optional[int] = np.array(UpperCamelCase )
if input_boxes is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , UpperCamelCase , original_sizes[0] , is_bounding_box=UpperCamelCase )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , UpperCamelCase , UpperCamelCase , is_bounding_box=UpperCamelCase )
for box, original_size in zip(UpperCamelCase , UpperCamelCase )
]
_snake_case : str = np.array(UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : int = torch.from_numpy(UpperCamelCase )
# boxes batch size of 1 by default
_snake_case : Any = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(UpperCamelCase )
# boxes batch size of 1 by default
_snake_case : Any = tf.expand_dims(UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Optional[int] = torch.from_numpy(UpperCamelCase )
# point batch size of 1 by default
_snake_case : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : int = tf.convert_to_tensor(UpperCamelCase )
# point batch size of 1 by default
_snake_case : Optional[Any] = tf.expand_dims(UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Optional[Any] = torch.from_numpy(UpperCamelCase )
# point batch size of 1 by default
_snake_case : Optional[int] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : int = tf.convert_to_tensor(UpperCamelCase )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Any = max([point.shape[0] for point in input_points] )
_snake_case : int = []
for i, point in enumerate(UpperCamelCase ):
if point.shape[0] != expected_nb_points:
_snake_case : List[str] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Optional[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase )
_snake_case : Union[str, Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : np.ndarray , UpperCamelCase : Dict , UpperCamelCase : List[str]=False ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = original_size
_snake_case , _snake_case : str = self.image_processor._get_preprocess_shape(UpperCamelCase , longest_edge=UpperCamelCase )
_snake_case : Optional[Any] = deepcopy(UpperCamelCase ).astype(UpperCamelCase )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Union[str, Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Any = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase_ ( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : int=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(UpperCamelCase , 'numpy' ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(input_points[0] , UpperCamelCase ):
raise ValueError('Input points must be a list of list of floating points.' )
_snake_case : Tuple = [np.array(UpperCamelCase ) for input_point in input_points]
else:
_snake_case : Optional[Any] = None
if input_labels is not None:
if hasattr(UpperCamelCase , 'numpy' ):
_snake_case : Dict = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(input_labels[0] , UpperCamelCase ):
raise ValueError('Input labels must be a list of list integers.' )
_snake_case : Optional[Any] = [np.array(UpperCamelCase ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(UpperCamelCase , 'numpy' ):
_snake_case : Tuple = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase , UpperCamelCase )
or not isinstance(input_boxes[0] , UpperCamelCase )
or not isinstance(input_boxes[0][0] , UpperCamelCase )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_snake_case : Tuple = [np.array(UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Any = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return self.image_processor.post_process_masks(*UpperCamelCase , **UpperCamelCase )
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
lowerCAmelCase_ = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Tuple:
_snake_case : str = list(state_dict.keys() )
for name in state_dict_keys:
_snake_case : List[Any] = state_dict.pop(lowerCAmelCase )
# emb -> embedding
if name.startswith('emb.' ):
_snake_case : List[Any] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_snake_case : Union[str, Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_snake_case : Dict = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowerCAmelCase )
# ffn -> feed_forward
_snake_case : Dict = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_snake_case : Union[str, Any] = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_snake_case : Tuple = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_snake_case : Optional[Any] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_snake_case : str = 'rwkv.' + name
_snake_case : Optional[int] = weight
return state_dict
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] , lowerCAmelCase: Any , lowerCAmelCase: Any=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: Optional[Any]=False , lowerCAmelCase: Optional[Any]=None )-> Union[str, Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_snake_case : int = 5_02_77
_snake_case : int = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_snake_case : List[Any] = PreTrainedTokenizerFast(tokenizer_file=lowerCAmelCase )
_snake_case : Tuple = len(lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
# 2. Build the config
_snake_case : Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_snake_case : Optional[Any] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_snake_case : Optional[Any] = RwkvConfig(
vocab_size=lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCAmelCase )
# 3. Download model file then convert state_dict
_snake_case : int = hf_hub_download(lowerCAmelCase , lowerCAmelCase )
_snake_case : Dict = torch.load(lowerCAmelCase , map_location='cpu' )
_snake_case : str = convert_state_dict(lowerCAmelCase )
# 4. Split in shards and save
_snake_case , _snake_case : Optional[Any] = shard_checkpoint(lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if index is not None:
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
# Save the index as well
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
_snake_case : Any = json.dumps(lowerCAmelCase , indent=2 , sort_keys=lowerCAmelCase ) + '\n'
f.write(lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_snake_case : int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_snake_case : Optional[Any] = torch.load(os.path.join(lowerCAmelCase , lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase )
model.push_to_hub(lowerCAmelCase , max_shard_size='2GB' )
tokenizer.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int
a_ : TreeNode | None =None
a_ : TreeNode | None =None
lowerCAmelCase_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase_ ( lowerCAmelCase: TreeNode | None )-> int:
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase ) != count_coins(lowerCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCAmelCase: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_snake_case , _snake_case : Optional[Any] = get_distrib(node.left )
_snake_case , _snake_case : int = get_distrib(node.right )
_snake_case : Optional[int] = 1 - left_distrib_excess
_snake_case : List[str] = 1 - right_distrib_excess
_snake_case : List[str] = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase )
+ abs(lowerCAmelCase )
)
_snake_case : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase , lowerCAmelCase )
return get_distrib(lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCAmelCase_ = False
@skip_mps
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =StableDiffusionAttendAndExcitePipeline
a_ : Optional[Any] =False
a_ : Union[str, Any] =TEXT_TO_IMAGE_PARAMS
a_ : Dict =TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
a_ : Tuple =TEXT_TO_IMAGE_IMAGE_PARAMS
a_ : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , )
_snake_case : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
_snake_case : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_snake_case : Any = CLIPTextModel(UpperCamelCase )
_snake_case : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case : int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : List[Any]=0 ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : Any = torch.manual_seed(UpperCamelCase )
else:
_snake_case : List[str] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Union[str, Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Tuple = self.get_dummy_inputs(UpperCamelCase )
_snake_case : Optional[Any] = pipe(**UpperCamelCase ).images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
_snake_case : List[str] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
_snake_case : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase , 1e-3 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = torch.manual_seed(51 )
_snake_case : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
_snake_case : List[Any] = 'a painting of an elephant with glasses'
_snake_case : Tuple = [5, 7]
_snake_case : int = pipe(
prompt=UpperCamelCase , token_indices=UpperCamelCase , guidance_scale=7.5 , generator=UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
_snake_case : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.