code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowerCAmelCase__ : str ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {}
__SCREAMING_SNAKE_CASE : Dict = {}
__SCREAMING_SNAKE_CASE : Optional[int] = {}
# preprocess args
if "points_per_batch" in kwargs:
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__SCREAMING_SNAKE_CASE : List[Any] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__SCREAMING_SNAKE_CASE : Dict = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__SCREAMING_SNAKE_CASE : str = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__SCREAMING_SNAKE_CASE : List[str] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__SCREAMING_SNAKE_CASE : Dict = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__SCREAMING_SNAKE_CASE : List[Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__SCREAMING_SNAKE_CASE : List[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__SCREAMING_SNAKE_CASE : int = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__SCREAMING_SNAKE_CASE : List[str] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any] , lowerCAmelCase__ : int , *lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : float = 5_1_2 / 1_5_0_0 , lowerCAmelCase__ : Optional[int] = 3_2 , lowerCAmelCase__ : Optional[int] = 1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = load_image(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.image_processor.size["""longest_edge"""]
__SCREAMING_SNAKE_CASE : Dict = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__SCREAMING_SNAKE_CASE : List[Any] = self.get_inference_context()
with inference_context():
__SCREAMING_SNAKE_CASE : List[str] = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
__SCREAMING_SNAKE_CASE : int = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = grid_points.shape[1]
__SCREAMING_SNAKE_CASE : Optional[int] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = grid_points[:, i : i + points_per_batch, :, :]
__SCREAMING_SNAKE_CASE : List[str] = input_labels[:, i : i + points_per_batch]
__SCREAMING_SNAKE_CASE : Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=0.88 , lowerCAmelCase__ : int=0.95 , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : List[str]=1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = model_inputs.pop("""input_boxes""" )
__SCREAMING_SNAKE_CASE : Any = model_inputs.pop("""is_last""" )
__SCREAMING_SNAKE_CASE : int = model_inputs.pop("""original_sizes""" ).tolist()
__SCREAMING_SNAKE_CASE : List[Any] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__SCREAMING_SNAKE_CASE : Any = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__SCREAMING_SNAKE_CASE : str = model_outputs["""pred_masks"""]
__SCREAMING_SNAKE_CASE : Dict = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = model_outputs["""iou_scores"""]
__SCREAMING_SNAKE_CASE : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Tuple=0.7 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {}
if output_rle_mask:
__SCREAMING_SNAKE_CASE : List[Any] = rle_mask
if output_bboxes_mask:
__SCREAMING_SNAKE_CASE : Any = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 112 |
'''simple docstring'''
from math import ceil
def UpperCamelCase_ ( A__ : int = 10_01 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : int = 2 * i + 1
lowerCAmelCase_ : Tuple = 2 * i
lowerCAmelCase_ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 120 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
if number > 0:
raise ValueError("input must be a negative integer" )
_a = len(bin(lowercase__ )[3:] )
_a = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:]
_a = (
(
'1'
+ '0' * (binary_number_length - len(lowercase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = 'autoformer'
__UpperCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self : List[str] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "student_t" , __UpperCAmelCase : str = "nll" , __UpperCAmelCase : int = 1 , __UpperCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 1_0_0 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : bool = True , __UpperCAmelCase : int=True , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : int = 2_5 , __UpperCAmelCase : int = 3 , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = prediction_length
UpperCAmelCase__ = context_length if context_length is not None else prediction_length
UpperCAmelCase__ = distribution_output
UpperCAmelCase__ = loss
UpperCAmelCase__ = input_size
UpperCAmelCase__ = num_time_features
UpperCAmelCase__ = lags_sequence
UpperCAmelCase__ = scaling
UpperCAmelCase__ = num_dynamic_real_features
UpperCAmelCase__ = num_static_real_features
UpperCAmelCase__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase__ = cardinality
else:
UpperCAmelCase__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase__ = embedding_dimension
else:
UpperCAmelCase__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = use_cache
# Autoformer
UpperCAmelCase__ = label_length
UpperCAmelCase__ = moving_average
UpperCAmelCase__ = autocorrelation_factor
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 65 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = FalconModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> List[str]:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = FalconModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = FalconModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , *UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCamelCase = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , use_cache=A_ )
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = model._convert_to_rw_cache(result.past_key_values )
UpperCamelCase = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , 'use_cache' ):
return
UpperCamelCase = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
UpperCamelCase = True
UpperCamelCase = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCamelCase = (
getattr(A_ , 'decoder_layers' , A_ )
or getattr(A_ , 'num_decoder_layers' , A_ )
or config.num_hidden_layers
)
UpperCamelCase = getattr(A_ , 'num_kv_heads' , config.num_attention_heads )
UpperCamelCase = getattr(A_ , 'd_model' , config.hidden_size )
UpperCamelCase = embed_dim // num_attention_heads
UpperCamelCase = outputs['past_key_values']
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase , UpperCamelCase = inputs['input_ids'].shape
for i in range(A_ ):
if config.new_decoder_architecture:
UpperCamelCase = config.num_attention_heads
elif config.multi_query:
UpperCamelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
UpperCamelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(A_ )
UpperCamelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ )
UpperCamelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
UpperCamelCase = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
UpperCamelCase = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCamelCase = AutoTokenizer.from_pretrained(A_ )
UpperCamelCase = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
UpperCamelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCamelCase = AutoTokenizer.from_pretrained(A_ )
UpperCamelCase = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
UpperCamelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(A_ )
# Test results are the same with and without cache
UpperCamelCase = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
UpperCamelCase = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 251 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = False , )-> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Embedding(A_ , A_ )
UpperCamelCase = nn.Embedding(A_ , A_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=A_ )
UpperCamelCase = TaConfig(
vocab_size=A_ , d_model=A_ , num_heads=A_ , d_kv=A_ , d_ff=A_ , dropout_rate=A_ , feed_forward_proj=A_ , is_decoder=A_ , is_encoder_decoder=A_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(A_ ):
UpperCamelCase = TaBlock(A_ )
self.encoders.append(A_ )
UpperCamelCase = TaLayerNorm(A_ )
UpperCamelCase = nn.Dropout(p=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.token_embedder(A_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(A_ , device=encoder_input_tokens.device )
x += self.position_encoding(A_ )
UpperCamelCase = self.dropout_pre(A_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ )
for lyr in self.encoders:
UpperCamelCase = lyr(A_ , A_ )[0]
UpperCamelCase = self.layer_norm(A_ )
return self.dropout_post(A_ ), encoder_inputs_mask
| 251 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCamelCase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
UpperCamelCase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any:
lowercase__ : Optional[int] = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase__ : str = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 87 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : TransformeraDModel , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : KarrasDiffusionSchedulers , __lowerCamelCase : Optional[Dict[int, str]] = None , ) -> str:
super().__init__()
self.register_modules(transformer=__lowerCamelCase , vae=__lowerCamelCase , scheduler=__lowerCamelCase )
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
SCREAMING_SNAKE_CASE__ = int(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dict(sorted(self.labels.items() ) )
def lowercase_ ( self : Tuple , __lowerCamelCase : Union[str, List[str]] ) -> List[int]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = list(__lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : float = 4.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE__ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCamelCase , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase , device=self.device ).reshape(-1 )
SCREAMING_SNAKE_CASE__ = torch.tensor([1000] * batch_size , device=self.device )
SCREAMING_SNAKE_CASE__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE__ = latent_model_input[: len(__lowerCamelCase ) // 2]
SCREAMING_SNAKE_CASE__ = torch.cat([half, half] , dim=0 )
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = t
if not torch.is_tensor(__lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE__ = latent_model_input.device.type == '''mps'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE__ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=__lowerCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
SCREAMING_SNAKE_CASE__ = self.transformer(
__lowerCamelCase , timestep=__lowerCamelCase , class_labels=__lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch.split(__lowerCamelCase , len(__lowerCamelCase ) // 2 , dim=0 )
SCREAMING_SNAKE_CASE__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE__ = torch.cat([half_eps, half_eps] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch.split(__lowerCamelCase , __lowerCamelCase , dim=1 )
else:
SCREAMING_SNAKE_CASE__ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = latent_model_input.chunk(2 , dim=0 )
else:
SCREAMING_SNAKE_CASE__ = latent_model_input
SCREAMING_SNAKE_CASE__ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE__ = self.vae.decode(__lowerCamelCase ).sample
SCREAMING_SNAKE_CASE__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 218 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "vivit"
def __init__( self : str , __lowerCamelCase : List[Any]=224 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Tuple=[2, 16, 16] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Any="gelu_fast" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1e-06 , __lowerCamelCase : Dict=True , **__lowerCamelCase : Any , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = tubelet_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
super().__init__(**__lowerCamelCase )
| 218 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = point_y / 4 / point_x
lowerCAmelCase__ : Optional[Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ : Tuple = outgoing_gradient**2 + 4
lowerCAmelCase__ : int = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase__ : int = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ : str = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ : Tuple = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
lowerCAmelCase__ : List[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1.4 , UpperCamelCase = -9.6 ):
"""simple docstring"""
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : float = first_x_coord
lowerCAmelCase__ : float = first_y_coord
lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ : Optional[Any] = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 37 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( lowercase ):
@require_torch
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__snake_case : Tuple = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__snake_case : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__snake_case : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_A)
BertModel.from_pretrained(_A)
BertTokenizer.from_pretrained(_A)
pipeline(task='fill-mask' , model=_A)
# baseline - just load from_pretrained with normal network
__snake_case : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__snake_case : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : str = '1'
__snake_case : Union[str, Any] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : Union[str, Any]) -> Union[str, Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__snake_case : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__snake_case : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__snake_case : str = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_A)
BertModel.from_pretrained(_A)
BertTokenizer.from_pretrained(_A)
pipeline(task='fill-mask' , model=_A)
# baseline - just load from_pretrained with normal network
__snake_case : Any = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__snake_case : int = self.get_env()
__snake_case : Tuple = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : int) -> Any:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
__snake_case : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
__snake_case : Optional[int] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
__snake_case : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__snake_case : Optional[int] = self.get_env()
__snake_case : Dict = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# next emulate no network
__snake_case : Optional[Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : Union[str, Any] = '1'
__snake_case : str = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : str) -> Dict:
__snake_case : Dict = '\nfrom transformers import pipeline\n '
__snake_case : List[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
__snake_case : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
__snake_case : str = self.get_env()
__snake_case : Tuple = '1'
__snake_case : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run])]
__snake_case : Optional[Any] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '') , )
@require_torch
def _lowercase (self : int) -> Optional[Any]:
__snake_case : int = '\nfrom transformers import AutoModel\n '
__snake_case : str = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
__snake_case : str = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__snake_case : str = self.get_env()
__snake_case : Dict = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : List[str] = '1'
__snake_case : Optional[int] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
| 95 | """simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
import numpy as np
def lowerCamelCase__ ( A__ : np.ndarray , A__ : float ):
'''simple docstring'''
return np.where(vector > 0 , A__ , (alpha * (np.exp(A__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "openai/whisper-base"
a : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
a : Optional[int] = "transcriber"
a : Optional[int] = WhisperProcessor
a : str = WhisperForConditionalGeneration
a : Any = ["audio"]
a : List[str] = ["text"]
def UpperCamelCase__ ( self, __magic_name__ ) -> Dict:
"""simple docstring"""
return self.pre_processor(__magic_name__, return_tensors='''pt''' ).input_features
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__, skip_special_tokens=__magic_name__ )[0]
| 247 |
import qiskit
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> qiskit.result.counts.Counts:
UpperCamelCase__ : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ : str = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase__ : Union[str, Any] = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 247 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase__( __UpperCamelCase: Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple[int, ...] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def lowercase__( __UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Optional[Sequence[bool]] = None ,__UpperCamelCase: Optional[Sequence[bool]] = None ,):
"""simple docstring"""
def reduce_edge_list(__UpperCamelCase: List[bool] ) -> None:
SCREAMING_SNAKE_CASE : List[str] = True
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : str = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : int = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(__UpperCamelCase ,__UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase ,__UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase ,s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[Any] = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : List[str] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase__( __UpperCamelCase: torch.Tensor ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : str = list(_flat_idx_to_idx(__UpperCamelCase ,__UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : int = list(_flat_idx_to_idx(flat_end - 1 ,__UpperCamelCase ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : str = _get_minimal_slice_set(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Dict[str, Any] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool = False ,__UpperCamelCase: Any = None ,__UpperCamelCase: bool = False ,):
"""simple docstring"""
if not (len(__UpperCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
SCREAMING_SNAKE_CASE : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : Optional[int] = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : str = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Tuple = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
SCREAMING_SNAKE_CASE : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[str] = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : List[str] = _select_chunk
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = partial(
_chunk_slice ,flat_start=__UpperCamelCase ,flat_end=min(__UpperCamelCase ,i + chunk_size ) ,no_batch_dims=len(__UpperCamelCase ) ,)
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(__UpperCamelCase ,__UpperCamelCase )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : int = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[Any] = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,__UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
def assign(__UpperCamelCase: dict ,__UpperCamelCase: dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
assign(__UpperCamelCase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = da[k]
assign(__UpperCamelCase ,__UpperCamelCase )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase ,__UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : List[str] = xa
elif isinstance(__UpperCamelCase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) ,__UpperCamelCase )
return out
class _a :
'''simple docstring'''
def __init__( self, A = 512, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : List[str] = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A ) -> bool:
try:
with torch.no_grad():
fn(*A, chunk_size=A )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : Any = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : Optional[Any] = i
SCREAMING_SNAKE_CASE : List[Any] = (i + len(A ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for aa, aa in zip(A, A ):
assert type(A ) == type(A )
if isinstance(A, (list, tuple) ):
consistent &= self._compare_arg_caches(A, A )
elif isinstance(A, A ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )]
consistent &= self._compare_arg_caches(A, A )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase_ ( self, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda A : a.shape if isinstance(A, torch.Tensor ) else a, A, A )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A )
SCREAMING_SNAKE_CASE : str = self._compare_arg_caches(self.cached_arg_data, A )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : Any = self._determine_favorable_chunk_size(
A, A, A, )
SCREAMING_SNAKE_CASE : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 251 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A ( __lowercase , __lowercase , unittest.TestCase ):
__UpperCAmelCase : Any = IFInpaintingSuperResolutionPipeline
__UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__UpperCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=0 ) -> Tuple:
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith("mps" ):
UpperCAmelCase__ = torch.manual_seed(UpperCAmelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
UpperCAmelCase__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 361 | from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = str(__A )
return n == n[::-1]
def lowerCAmelCase_ ( __A = 1_000_000 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = 0
for i in range(1, __A ):
if is_palindrome(__A ) and is_palindrome(bin(__A ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 143 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Dict = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 218 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'BridgeTowerImageProcessor'
SCREAMING_SNAKE_CASE = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ) -> BatchEncoding:
'''simple docstring'''
__a =self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel_values + pixel_mask
__a =self.image_processor(
__snake_case , return_tensors=__snake_case , do_normalize=__snake_case , do_center_crop=__snake_case , **__snake_case )
encoding.update(__snake_case )
return encoding
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.tokenizer.model_input_names
__a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 218 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__lowerCamelCase = ['small', 'medium', 'large']
__lowerCamelCase = 'lm_head.decoder.weight'
__lowerCamelCase = 'lm_head.weight'
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
snake_case : Optional[int] = torch.load(snake_case_ )
snake_case : Union[str, Any] = d.pop(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
__lowerCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__lowerCamelCase = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
__lowerCamelCase = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 367 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , lowerCAmelCase__=1 / 2_5_5 , lowerCAmelCase__=True , ) -> str:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
a__ : Tuple =parent
a__ : Any =batch_size
a__ : int =num_channels
a__ : List[str] =min_resolution
a__ : Tuple =max_resolution
a__ : Tuple =do_resize
a__ : Dict =size
a__ : List[str] =do_normalize
a__ : Optional[Any] =image_mean
a__ : Tuple =image_std
a__ : Dict =do_rescale
a__ : List[Any] =rescale_factor
a__ : Optional[Any] =do_pad
def _lowercase ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
if not batched:
a__ : Tuple =image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
a__ , a__ : Union[str, Any] =image.size
else:
a__ , a__ : Optional[int] =image.shape[1], image.shape[2]
if w < h:
a__ : Optional[int] =int(self.size["shortest_edge"] * h / w )
a__ : List[Any] =self.size["shortest_edge"]
elif w > h:
a__ : Dict =self.size["shortest_edge"]
a__ : List[Any] =int(self.size["shortest_edge"] * w / h )
else:
a__ : List[Any] =self.size["shortest_edge"]
a__ : int =self.size["shortest_edge"]
else:
a__ : List[Any] =[]
for image in image_inputs:
a__ , a__ : Optional[int] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ : Any =max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
a__ : Union[str, Any] =max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Tuple = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
a__ : List[str] =self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ : List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ : List[str] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
a__ : Optional[int] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Union[str, Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ : Dict =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
a__ , a__ : List[str] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : int =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ , a__ : List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
a__ , a__ : Tuple =self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : int =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a__ : Optional[int] =json.loads(f.read() )
a__ : Any ={"image_id": 3_9_7_6_9, "annotations": target}
# encode them
a__ : Union[str, Any] =DeformableDetrImageProcessor()
a__ : Tuple =image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
a__ : Tuple =torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
a__ : Any =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
a__ : List[Any] =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
a__ : Any =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
a__ : Optional[Any] =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
a__ : int =torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
a__ : str =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
a__ : List[str] =torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
a__ : Optional[int] =torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
a__ : int =torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[str] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a__ : int =json.loads(f.read() )
a__ : List[str] ={"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
a__ : List[Any] =pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a__ : Any =DeformableDetrImageProcessor(format="coco_panoptic" )
a__ : Optional[Any] =image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
a__ : Union[str, Any] =torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
a__ : Union[str, Any] =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
a__ : Dict =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
a__ : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
a__ : Any =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
a__ : int =torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
a__ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
a__ : Optional[Any] =torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
a__ : int =8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
a__ : List[str] =torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
a__ : int =torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 95 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = """align_text_model"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = """align_vision_model"""
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 6_0_0 , __UpperCamelCase = 2.0 , __UpperCamelCase = 3.1 , __UpperCamelCase = 8 , __UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __UpperCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __UpperCamelCase = [] , __UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase = 0.25 , __UpperCamelCase = "swish" , __UpperCamelCase = 2_5_6_0 , __UpperCamelCase = "mean" , __UpperCamelCase = 0.02 , __UpperCamelCase = 0.001 , __UpperCamelCase = 0.99 , __UpperCamelCase = 0.2 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = width_coefficient
UpperCamelCase_ = depth_coefficient
UpperCamelCase_ = depth_divisor
UpperCamelCase_ = kernel_sizes
UpperCamelCase_ = in_channels
UpperCamelCase_ = out_channels
UpperCamelCase_ = depthwise_padding
UpperCamelCase_ = strides
UpperCamelCase_ = num_block_repeats
UpperCamelCase_ = expand_ratios
UpperCamelCase_ = squeeze_expansion_ratio
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = pooling_type
UpperCamelCase_ = initializer_range
UpperCamelCase_ = batch_norm_eps
UpperCamelCase_ = batch_norm_momentum
UpperCamelCase_ = drop_connect_rate
UpperCamelCase_ = sum(__UpperCamelCase ) * 4
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = """align"""
A__ : int = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=6_4_0 , __UpperCamelCase=1.0 , __UpperCamelCase=0.02 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if text_config is None:
UpperCamelCase_ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
UpperCamelCase_ = AlignTextConfig(**__UpperCamelCase )
UpperCamelCase_ = AlignVisionConfig(**__UpperCamelCase )
UpperCamelCase_ = projection_dim
UpperCamelCase_ = temperature_init_value
UpperCamelCase_ = initializer_range
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 261 | 0 |
"""simple docstring"""
import torch
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 247 |
"""simple docstring"""
import os
import numpy
import onnx
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
A__ = a.name
A__ = b.name
A__ = ""
A__ = ""
A__ = a == b
A__ = name_a
A__ = name_b
return res
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase_ , lowercase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase_ , lowercase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
for n in graph_proto.node:
_node_replace_input_with(lowercase_ , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = os.path.dirname(lowercase_ )
A__ = os.path.basename(lowercase_ )
A__ = onnx.load(os.path.join(lowercase_ , lowercase_ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(lowercase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase_ )
dup_set.add(lowercase_ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , lowercase_ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase_ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
A__ = sorted(lowercase_ )
_remove_dup_initializers_from_model(lowercase_ , lowercase_ , lowercase_ )
A__ = "optimized_" + model_file_name
A__ = os.path.join(lowercase_ , lowercase_ )
onnx.save(lowercase_ , lowercase_ )
return new_model
| 247 | 1 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :int = PriorTransformer
lowerCamelCase_ :Optional[int] = """hidden_states"""
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : int = 7
UpperCAmelCase_ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCamelCase ( self , snake_case_=0 ):
'''simple docstring'''
torch.manual_seed(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Tuple = 8
UpperCAmelCase_ : Any = 7
UpperCAmelCase_ : Dict = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (4, 8)
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (4, 8)
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
UpperCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : str = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : str = self.model_class(**lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
UpperCAmelCase_ : Any = model.to(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
UpperCAmelCase_ : Optional[Any] = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase_ : int = model(**lowerCAmelCase__ )[0]
UpperCAmelCase_ : List[Any] = output[0, :5].flatten().cpu()
print(lowerCAmelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-2 ) )
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , snake_case_=1 , snake_case_=7_6_8 , snake_case_=7_7 , snake_case_=0 ):
'''simple docstring'''
torch.manual_seed(lowerCAmelCase__ )
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : str = embedding_dim
UpperCAmelCase_ : Dict = num_embeddings
UpperCAmelCase_ : List[str] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase__ )
UpperCAmelCase_ : int = self.get_dummy_seed_input(seed=lowerCAmelCase__ )
with torch.no_grad():
UpperCAmelCase_ : int = model(**lowerCAmelCase__ )[0]
assert list(sample.shape ) == [1, 7_6_8]
UpperCAmelCase_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(lowerCAmelCase__ )
UpperCAmelCase_ : int = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
| 366 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ : Dict = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = '''albert'''
def __init__( self , snake_case_=3_0_0_0_0 , snake_case_=1_2_8 , snake_case_=4_0_9_6 , snake_case_=1_2 , snake_case_=1 , snake_case_=6_4 , snake_case_=1_6_3_8_4 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 274 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 52 | from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __snake_case :
def __init__( self , __UpperCamelCase , ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = parent
snake_case__ : Union[str, Any] = 13
snake_case__ : int = 7
snake_case__ : str = True
snake_case__ : Dict = True
snake_case__ : Tuple = False
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = 99
snake_case__ : Tuple = 32
snake_case__ : Optional[int] = 2
snake_case__ : Dict = 4
snake_case__ : Dict = 37
snake_case__ : Any = 'gelu'
snake_case__ : Any = 0.1
snake_case__ : Any = 0.1
snake_case__ : List[Any] = 512
snake_case__ : Optional[Any] = 16
snake_case__ : Optional[int] = 2
snake_case__ : List[Any] = 0.0_2
snake_case__ : Tuple = 3
snake_case__ : Dict = 4
snake_case__ : Tuple = None
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[Any] = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = TFDistilBertModel(config=__UpperCamelCase )
snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : List[str] = model(__UpperCamelCase )
snake_case__ : Union[str, Any] = [input_ids, input_mask]
snake_case__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = TFDistilBertForMaskedLM(config=__UpperCamelCase )
snake_case__ : int = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = TFDistilBertForQuestionAnswering(config=__UpperCamelCase )
snake_case__ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
snake_case__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = self.num_labels
snake_case__ : Tuple = TFDistilBertForSequenceClassification(__UpperCamelCase )
snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.num_choices
snake_case__ : str = TFDistilBertForMultipleChoice(__UpperCamelCase )
snake_case__ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Tuple = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Dict = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
snake_case__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[int] = TFDistilBertForTokenClassification(__UpperCamelCase )
snake_case__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) : Tuple = config_and_inputs
snake_case__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowerCamelCase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = TFDistilBertModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case__ : Optional[Any] = TFDistilBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
snake_case__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Dict = model(__UpperCamelCase )[0]
snake_case__ : Optional[int] = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
snake_case__ : List[Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 143 | 0 |
'''simple docstring'''
import torch
def _UpperCAmelCase ( ) -> Tuple:
if torch.cuda.is_available():
A_ = torch.cuda.device_count()
else:
A_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 351 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Dict = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __lowercase ( __lowerCAmelCase : Optional[int] ):
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class snake_case_ :
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ,__snake_case :List[Any] ) -> Optional[int]:
pass
def lowerCamelCase__( self :List[str] ) -> str:
pass
def lowerCamelCase__( self :Dict ) -> Dict:
pass
def lowerCamelCase__( self :Any ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :int=None ,**__snake_case :Optional[int] ) -> Any:
a__ = VisionTextDualEncoderConfig.from_vision_text_configs(__snake_case ,__snake_case )
a__ = TFVisionTextDualEncoderModel(__snake_case )
a__ = model(input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Any ,__snake_case :Union[str, Any] ,__snake_case :Dict=None ,**__snake_case :Any ) -> Optional[int]:
a__ , a__ = self.get_vision_text_model(__snake_case ,__snake_case )
a__ = TFVisionTextDualEncoderModel(vision_model=__snake_case ,text_model=__snake_case )
a__ = model(input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase__( self :int ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :Optional[Any]=None ,**__snake_case :Optional[int] ) -> Any:
a__ , a__ = self.get_vision_text_model(__snake_case ,__snake_case )
a__ = {'vision_model': vision_model, 'text_model': text_model}
a__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__snake_case )
a__ = model(input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any]=None ,**__snake_case :Dict ) -> Union[str, Any]:
a__ , a__ = self.get_vision_text_model(__snake_case ,__snake_case )
a__ = TFVisionTextDualEncoderModel(vision_model=__snake_case ,text_model=__snake_case )
a__ = model(input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case )
a__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
a__ = TFVisionTextDualEncoderModel.from_pretrained(__snake_case )
a__ = model(input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case )
a__ = after_output[0].numpy()
a__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__snake_case ,1E-5 )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Tuple=None ,**__snake_case :Optional[int] ) -> List[Any]:
a__ , a__ = self.get_vision_text_model(__snake_case ,__snake_case )
a__ = TFVisionTextDualEncoderModel(vision_model=__snake_case ,text_model=__snake_case )
a__ = model(
input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case ,output_attentions=__snake_case )
a__ = output.vision_model_output.attentions
self.assertEqual(len(__snake_case ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ = to_atuple(vision_model.config.image_size )
a__ = to_atuple(vision_model.config.patch_size )
a__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
a__ = output.text_model_output.attentions
self.assertEqual(len(__snake_case ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :np.ndarray ,__snake_case :float ) -> int:
a__ = np.abs((a - b) ).max()
self.assertLessEqual(__snake_case ,__snake_case ,F'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__snake_case )
def lowerCamelCase__( self :str ) -> List[Any]:
a__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__snake_case )
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__snake_case )
def lowerCamelCase__( self :int ) -> str:
a__ = self.prepare_config_and_inputs()
self.check_save_load(**__snake_case )
def lowerCamelCase__( self :List[str] ) -> int:
a__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__snake_case )
@slow
def lowerCamelCase__( self :List[str] ) -> str:
a__ , a__ = self.get_pretrained_model_and_inputs()
a__ = model_a(**__snake_case )
a__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__snake_case )
a__ = TFVisionTextDualEncoderModel.from_pretrained(__snake_case )
a__ = model_a(**__snake_case )
a__ = after_outputs[0].numpy()
a__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__snake_case ,1E-5 )
@require_tf
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' ,'hf-internal-testing/tiny-random-bert' )
a__ = 13
a__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
a__ = random_attention_mask([batch_size, 4] )
a__ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ,__snake_case :Tuple ) -> Tuple:
a__ = TFViTModel(__snake_case ,name='vision_model' )
a__ = TFBertModel(__snake_case ,name='text_model' )
return vision_model, text_model
def lowerCamelCase__( self :str ) -> Tuple:
a__ = TFViTModelTester(self )
a__ = TFBertModelTester(self )
a__ = vit_model_tester.prepare_config_and_inputs()
a__ = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
def lowerCamelCase__( self :int ) -> Optional[int]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
a__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' ,'hf-internal-testing/tiny-random-roberta' )
a__ = 13
a__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
a__ = random_attention_mask([batch_size, 4] )
a__ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Dict=None ,**__snake_case :Optional[int] ) -> Union[str, Any]:
a__ , a__ = self.get_vision_text_model(__snake_case ,__snake_case )
a__ = TFVisionTextDualEncoderModel(vision_model=__snake_case ,text_model=__snake_case )
a__ = model(
input_ids=__snake_case ,pixel_values=__snake_case ,attention_mask=__snake_case ,output_attentions=__snake_case )
a__ = output.vision_model_output.attentions
self.assertEqual(len(__snake_case ) ,vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ = to_atuple(vision_model.config.image_size )
a__ = to_atuple(vision_model.config.patch_size )
a__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
a__ = output.text_model_output.attentions
self.assertEqual(len(__snake_case ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Tuple ,__snake_case :List[str] ) -> Any:
a__ = TFDeiTModel(__snake_case ,name='vision_model' )
a__ = TFRobertaModel(__snake_case ,name='text_model' )
return vision_model, text_model
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = TFDeiTModelTester(self )
a__ = TFRobertaModelTester(self )
a__ = vit_model_tester.prepare_config_and_inputs()
a__ = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' ,'hf-internal-testing/tiny-random-bert' )
a__ = 13
a__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
a__ = random_attention_mask([batch_size, 4] )
a__ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ,__snake_case :str ) -> Optional[int]:
a__ = TFCLIPVisionModel(__snake_case ,name='vision_model' )
a__ = TFBertModel(__snake_case ,name='text_model' )
return vision_model, text_model
def lowerCamelCase__( self :Dict ) -> str:
a__ = TFCLIPVisionModelTester(self )
a__ = TFBertModelTester(self )
a__ = clip_model_tester.prepare_config_and_inputs()
a__ = bert_model_tester.prepare_config_and_inputs()
a__ , a__ = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' ,logit_scale_init_value=1.0 ,from_pt=__snake_case )
a__ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
a__ = processor(
text=['una foto di un gatto', 'una foto di un cane'] ,images=__snake_case ,padding=__snake_case ,return_tensors='np' )
a__ = model(**__snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
a__ = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,__snake_case ,atol=1E-3 ) )
| 109 |
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( __lowerCAmelCase : dict[int, list[int]] ):
a__ = 0
a__ = len(__lowerCAmelCase ) # No of vertices in graph
a__ = [0] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a__ = True
a__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
a__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a__ = min(low[at] , low[to] )
a__ = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
"""simple docstring"""
from typing import Any
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> str:
'''simple docstring'''
_validation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# Creates data structures and fill initial step
lowercase_ = {}
lowercase_ = {}
for state in states_space:
lowercase_ = observations_space[0]
lowercase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowercase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCAmelCase ) ):
lowercase_ = observations_space[o]
lowercase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowercase_ = """"""
lowercase_ = -1
for k_state in states_space:
lowercase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowercase_ = probability
lowercase_ = k_state
# Update probabilities and pointers dicts
lowercase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowercase_ = arg_max
# The final observation
lowercase_ = observations_space[len(__lowerCAmelCase ) - 1]
# argmax for given final observation
lowercase_ = """"""
lowercase_ = -1
for k_state in states_space:
lowercase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowercase_ = probability
lowercase_ = k_state
lowercase_ = arg_max
# Process pointers backwards
lowercase_ = last_state
lowercase_ = []
for o in range(len(__lowerCAmelCase ) - 1 , -1 , -1 ):
result.append(__lowerCAmelCase )
lowercase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Any:
'''simple docstring'''
_validate_not_empty(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_validate_lists(__lowerCAmelCase , __lowerCAmelCase )
_validate_dicts(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
_validate_list(__lowerCAmelCase , """observations_space""" )
_validate_list(__lowerCAmelCase , """states_space""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not isinstance(_object , __lowerCAmelCase ):
lowercase_ = F'''{var_name} must be a list'''
raise ValueError(__lowerCAmelCase )
else:
for x in _object:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = F'''{var_name} must be a list of strings'''
raise ValueError(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Any:
'''simple docstring'''
_validate_dict(__lowerCAmelCase , """initial_probabilities""" , __lowerCAmelCase )
_validate_nested_dict(__lowerCAmelCase , """transition_probabilities""" )
_validate_nested_dict(__lowerCAmelCase , """emission_probabilities""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_validate_dict(_object , __lowerCAmelCase , __lowerCAmelCase )
for x in _object.values():
_validate_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_object , __lowerCAmelCase ):
lowercase_ = F'''{var_name} must be a dict'''
raise ValueError(__lowerCAmelCase )
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for x in _object ):
lowercase_ = F'''{var_name} all keys must be strings'''
raise ValueError(__lowerCAmelCase )
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for x in _object.values() ):
lowercase_ = """nested dictionary """ if nested else """"""
lowercase_ = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136 | """simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json'}
a_ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
a_ = {'mgp-str': 27}
class _lowercase ( lowerCamelCase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , snake_case : Union[str, Any] , snake_case : str="[GO]" , snake_case : List[Any]="[GO]" , snake_case : Optional[int]="[s]" , snake_case : Any="[GO]" , **snake_case : str ) -> Optional[int]:
"""simple docstring"""
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='utf-8' ) as vocab_handle:
UpperCamelCase_ : int = json.load(__lowerCamelCase )
UpperCamelCase_ : Any = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return len(self.vocab )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str ) -> Any:
"""simple docstring"""
return self.decoder.get(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int , snake_case : Any = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__lowerCamelCase ) )
return
UpperCamelCase_ : Any = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '\n' )
return (vocab_file,)
| 355 | import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , ):
if attention_mask is None:
UpperCamelCase_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
UpperCamelCase_ : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
UpperCamelCase_ : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : str=1_3 , snake_case : Optional[int]=7 , snake_case : int=True , snake_case : str=False , snake_case : str=9_9 , snake_case : int=1_6 , snake_case : str=2 , snake_case : Dict=4 , snake_case : Tuple=4 , snake_case : List[Any]="relu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : List[str]=0.0 , snake_case : int=0.0 , snake_case : Any=2_0 , snake_case : Union[str, Any]=2 , snake_case : Tuple=1 , snake_case : Optional[int]=0 , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Tuple = seq_length
UpperCamelCase_ : Dict = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : Tuple = vocab_size
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : List[Any] = encoder_layerdrop
UpperCamelCase_ : Any = decoder_layerdrop
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = eos_token_id
UpperCamelCase_ : int = pad_token_id
UpperCamelCase_ : str = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Any = self.eos_token_id # Eos Token
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : str = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : str = self.get_config()
UpperCamelCase_ : Any = prepare_mam_aaa_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
UpperCamelCase_ : str = inputs_dict['input_ids']
UpperCamelCase_ : Any = inputs_dict['attention_mask']
UpperCamelCase_ : Optional[int] = inputs_dict['head_mask']
# first forward pass
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case )
UpperCamelCase_, UpperCamelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ : Union[str, Any] = model(snake_case , attention_mask=snake_case )['last_hidden_state']
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
'last_hidden_state'
]
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-2 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModel(config=snake_case ).to(snake_case ).eval()
UpperCamelCase_ : List[str] = model(**snake_case )
UpperCamelCase_ : List[Any] = outputs.encoder_last_hidden_state
UpperCamelCase_ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : Optional[int] = model.get_encoder()
encoder.save_pretrained(snake_case )
UpperCamelCase_ : Tuple = MaMaaaEncoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : Optional[Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : int = model.get_decoder()
decoder.save_pretrained(snake_case )
UpperCamelCase_ : int = MaMaaaDecoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=snake_case , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : str , snake_case : str , snake_case : Dict ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModelTester(self )
UpperCamelCase_ : Optional[Any] = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_, UpperCamelCase_ : str = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info['missing_keys'] , [] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
if not self.is_encoder_decoder:
UpperCamelCase_ : List[Any] = inputs['input_ids']
del inputs["input_ids"]
else:
UpperCamelCase_ : str = inputs['input_ids']
UpperCamelCase_ : List[str] = inputs.get('decoder_input_ids' , snake_case )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , snake_case )
UpperCamelCase_ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase_ : Tuple = wte(snake_case )
else:
UpperCamelCase_ : Optional[int] = wte(snake_case )
UpperCamelCase_ : Optional[int] = wte(snake_case )
with torch.no_grad():
model(**snake_case )[0]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : str = input_dict['input_ids']
UpperCamelCase_ : int = input_ids.ne(1 ).to(snake_case )
UpperCamelCase_ : Dict = MaMaaaForConditionalGeneration(snake_case ).eval().to(snake_case )
if torch_device == "cuda":
model.half()
model.generate(snake_case , attention_mask=snake_case )
model.generate(num_beams=4 , do_sample=snake_case , early_stopping=snake_case , num_return_sequences=3 )
def __lowercase ( lowerCamelCase : List[Any] ):
return torch.tensor(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Dict = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Any = model(**snake_case )[0]
UpperCamelCase_ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
# change to intended input
UpperCamelCase_ : Tuple = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Union[str, Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Dict = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Dict = model(**snake_case )[0]
UpperCamelCase_ : Union[str, Any] = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Any = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : Optional[int] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
UpperCamelCase_ : Union[str, Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase_ : Optional[Any] = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
UpperCamelCase_ : Dict = model.generate(
input_ids=dct['input_ids'].to(snake_case ) , attention_mask=dct['attention_mask'].to(snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
UpperCamelCase_ : Optional[int] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
UpperCamelCase_ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case , skip_special_tokens=snake_case )
assert generated == expected_en
| 50 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Any = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""c"""] )
self.assertEqual(__lowerCAmelCase , [2] )
# Out indices set to match out features
A__ , A__ = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features set to match out indices
A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(__lowerCAmelCase , [-3, -1] )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase )
# Out features must be a list
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = BackboneMixin()
A__ = ["""a""", """b""", """c"""]
A__ = ["""a""", """c"""]
A__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A__ = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
A__ = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 274 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'wavlm'
def __init__( self : int , snake_case : Dict=32 , snake_case : Optional[Any]=768 , snake_case : Optional[int]=12 , snake_case : str=12 , snake_case : str=3072 , snake_case : Union[str, Any]="gelu" , snake_case : List[str]=0.1 , snake_case : Tuple=0.1 , snake_case : Dict=0.1 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=0.02 , snake_case : Dict=1e-5 , snake_case : Optional[Any]="group" , snake_case : Any="gelu" , snake_case : List[str]=(512, 512, 512, 512, 512, 512, 512) , snake_case : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case : List[str]=(10, 3, 3, 3, 3, 2, 2) , snake_case : List[str]=False , snake_case : Optional[Any]=128 , snake_case : int=16 , snake_case : List[str]=320 , snake_case : Dict=800 , snake_case : Optional[Any]=False , snake_case : Union[str, Any]=True , snake_case : str=0.05 , snake_case : Tuple=10 , snake_case : List[Any]=2 , snake_case : Any=0.0 , snake_case : int=10 , snake_case : str=320 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.1 , snake_case : Union[str, Any]=100 , snake_case : Optional[int]=256 , snake_case : List[str]=256 , snake_case : Optional[int]=0.1 , snake_case : Tuple="mean" , snake_case : Any=False , snake_case : Optional[int]=False , snake_case : List[str]=256 , snake_case : int=(512, 512, 512, 512, 1500) , snake_case : str=(5, 3, 3, 1, 1) , snake_case : Optional[Any]=(1, 2, 3, 1, 1) , snake_case : int=512 , snake_case : Any=80 , snake_case : Any=0 , snake_case : Dict=1 , snake_case : Dict=2 , snake_case : Optional[Any]=False , snake_case : Any=3 , snake_case : str=2 , snake_case : Optional[Any]=3 , snake_case : str=None , **snake_case : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
A__ : Any = hidden_size
A__ : List[Any] = feat_extract_norm
A__ : Any = feat_extract_activation
A__ : Dict = list(snake_case )
A__ : int = list(snake_case )
A__ : Union[str, Any] = list(snake_case )
A__ : int = conv_bias
A__ : Optional[int] = num_buckets
A__ : List[str] = max_bucket_distance
A__ : Any = num_conv_pos_embeddings
A__ : int = num_conv_pos_embedding_groups
A__ : Union[str, Any] = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : Dict = intermediate_size
A__ : Union[str, Any] = hidden_act
A__ : Union[str, Any] = num_attention_heads
A__ : List[Any] = hidden_dropout
A__ : List[Any] = attention_dropout
A__ : Optional[int] = activation_dropout
A__ : Any = feat_proj_dropout
A__ : str = final_dropout
A__ : str = layerdrop
A__ : Optional[int] = layer_norm_eps
A__ : List[str] = initializer_range
A__ : str = num_ctc_classes
A__ : List[str] = vocab_size
A__ : List[str] = do_stable_layer_norm
A__ : Optional[Any] = use_weighted_layer_sum
A__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Union[str, Any] = apply_spec_augment
A__ : List[Any] = mask_time_prob
A__ : str = mask_time_length
A__ : str = mask_time_min_masks
A__ : List[str] = mask_feature_prob
A__ : Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
A__ : int = num_codevectors_per_group
A__ : Any = num_codevector_groups
A__ : Tuple = contrastive_logits_temperature
A__ : str = num_negatives
A__ : Optional[Any] = codevector_dim
A__ : List[Any] = proj_codevector_dim
A__ : Tuple = diversity_loss_weight
# ctc loss
A__ : int = ctc_loss_reduction
A__ : Union[str, Any] = ctc_zero_infinity
# adapter
A__ : List[str] = add_adapter
A__ : Dict = adapter_kernel_size
A__ : Optional[int] = adapter_stride
A__ : Optional[int] = num_adapter_layers
A__ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ : str = list(snake_case )
A__ : Union[str, Any] = list(snake_case )
A__ : Tuple = list(snake_case )
A__ : Tuple = xvector_output_dim
@property
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 296 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ = object()
# For specifying empty leaf dict `{}`
A_ = object()
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any] ) ->Dict:
A__ : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
A__ : Optional[Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__, ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Dict:
def replace(UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
for rule, replacement in rules:
if _match(UpperCAmelCase__, UpperCAmelCase__ ):
return replacement
return val
return replace
def _lowerCAmelCase ( ) ->Tuple:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""", UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple ) ->Any:
A__ : Union[str, Any] = _get_partition_rules()
A__ : int = _replacement_rules(UpperCAmelCase__ )
A__ : Tuple = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
A__ : Optional[int] = {k: replace(UpperCAmelCase__, UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 296 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowercase_ = get_logger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=None ):
__a = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , _a , getattr(_a , _a ) )
__a = module._original_module if isinstance(_a , _PatchedModuleObj ) else module
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : int = []
def __init__( self , _a , _a , _a , _a=None ):
__a = obj
__a = target
__a = new
__a = target.split('''.''' )[0]
__a = {}
__a = attrs or []
def __enter__( self ):
*__a , __a = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_a ) ):
try:
__a = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__a = getattr(self.obj , _a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_a , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__a = obj_attr
# patch at top level
setattr(self.obj , _a , _PatchedModuleObj(_a , attrs=self.attrs ) )
__a = getattr(self.obj , _a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_a , _a , _PatchedModuleObj(getattr(_a , _a , _a ) , attrs=self.attrs ) )
__a = getattr(_a , _a )
# finally set the target attribute
setattr(_a , _a , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__a = getattr(import_module('''.'''.join(_a ) ) , _a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _a ) is attr_value:
__a = getattr(self.obj , _a )
setattr(self.obj , _a , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__a = globals()['''__builtins__'''][target_attr]
setattr(self.obj , _a , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *_a ):
for attr in list(self.original ):
setattr(self.obj , _a , self.original.pop(_a ) )
def __UpperCAmelCase ( self ):
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 45 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
'''simple docstring'''
import requests
__lowercase : List[str] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
# fetching a list of articles in json format
__a : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 294 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A: int = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ["DeiTFeatureExtractor"]
A: Dict = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A: List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any ):
for attribute in key.split(""".""" ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : str = value
elif weight_type == "weight_v":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Dict = True
if "*" in mapped_key:
UpperCAmelCase : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : str = """weight"""
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Any ):
UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Dict = name.split(""".""" )
UpperCAmelCase : List[str] = int(items[0] )
UpperCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=None ):
# load the pre-trained checkpoints
UpperCAmelCase : List[Any] = torch.load(UpperCamelCase )
UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCAmelCase : Optional[int] = WavLMOrig(UpperCamelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(UpperCamelCase )
else:
UpperCAmelCase : List[Any] = WavLMConfig()
UpperCAmelCase : Any = WavLMModel(UpperCamelCase )
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavlm.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A: Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 109 | 1 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 1 ,lowercase = 1 ,lowercase = 1.0E4 ,lowercase = False ,lowercase = 1.0 ,):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_UpperCAmelCase = float(embedding_dim // 2 )
_UpperCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_UpperCAmelCase = min_timescale * jnp.exp(jnp.arange(_a ,dtype=jnp.floataa ) * -log_timescale_increment )
_UpperCAmelCase = jnp.expand_dims(_a ,1 ) * jnp.expand_dims(_a ,0 )
# scale embeddings
_UpperCAmelCase = scale * emb
if flip_sin_to_cos:
_UpperCAmelCase = jnp.concatenate([jnp.cos(_a ), jnp.sin(_a )] ,axis=1 )
else:
_UpperCAmelCase = jnp.concatenate([jnp.sin(_a ), jnp.cos(_a )] ,axis=1 )
_UpperCAmelCase = jnp.reshape(_a ,[jnp.shape(_a )[0], embedding_dim] )
return signal
class a ( nn.Module ):
_snake_case : Any = 32
_snake_case : int = jnp.floataa
@nn.compact
def __call__( self : Optional[Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(lowerCamelCase_ )
_UpperCAmelCase = nn.silu(lowerCamelCase_ )
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(lowerCamelCase_ )
return temb
class a ( nn.Module ):
_snake_case : Dict = 32
_snake_case : int = False
_snake_case : Any = 1
@nn.compact
def __call__( self : str , __lowerCAmelCase : List[str] ):
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 360 | """simple docstring"""
import string
from math import logaa
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase ))
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return round(tf * idf ,3 )
| 30 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ):
__lowerCAmelCase = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=SCREAMING_SNAKE_CASE_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
def _a ( ):
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(SCREAMING_SNAKE_CASE_ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 92 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase : str = 50000
lowercase : str = 5000
lowercase : List[str] = os.path.split(__file__)
lowercase : Any = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = dataset[i]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = dataset[i : i + batch_size]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : int = dataset[i]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = dataset[i : i + batch_size]
def _snake_case( ) -> Any:
lowercase : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowercase : Dict = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
lowercase : Any = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowercase : Optional[int] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowercase : str = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , """dataset.arrow""" ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
print("""shuffling dataset""" )
lowercase : str = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
lowercase : str = func(
SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 356 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 285 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ) -> Tuple:
'''simple docstring'''
assert masked_input.count("""<mask>""" ) == 1
SCREAMING_SNAKE_CASE = torch.tensor(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) ).unsqueeze(0 ) # Batch size 1
SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )[0] # The last hidden-state is the first element of the output tuple
SCREAMING_SNAKE_CASE = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
SCREAMING_SNAKE_CASE = logits[0, masked_index, :]
SCREAMING_SNAKE_CASE = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = prob.topk(k=_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] )
SCREAMING_SNAKE_CASE = tokenizer.mask_token
SCREAMING_SNAKE_CASE = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
SCREAMING_SNAKE_CASE = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(_SCREAMING_SNAKE_CASE ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
SCREAMING_SNAKE_CASE_ = CamembertTokenizer.from_pretrained("""camembert-base""")
SCREAMING_SNAKE_CASE_ = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
SCREAMING_SNAKE_CASE_ = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 296 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None ) -> Optional[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__lowerCamelCase = nn.Parameter(__lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__lowerCamelCase = nn.Parameter(__lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> List[Any]:
# set torch weights for 1-to-1 comparison
__lowerCamelCase = np.asarray(weights[0] )
__lowerCamelCase = np.asarray(weights[1] )
__lowerCamelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> List[Any]:
# set torch weights for 1-to-1 comparison
__lowerCamelCase = np.asarray(weights[0] )
__lowerCamelCase = np.asarray(weights[1] )
__lowerCamelCase = np.asarray(weights[2] )
__lowerCamelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> Tuple:
# layernorm 1
__lowerCamelCase = weights[0][0][0]
__lowerCamelCase = np.asarray(layer_norm_a[0] )
__lowerCamelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# lsh weights + output
__lowerCamelCase = weights[0][1]
if len(__lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
else:
set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
# intermediate weighs
__lowerCamelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCAmelCase ) == 4:
__lowerCamelCase = intermediate_weights[2]
# layernorm 2
__lowerCamelCase = np.asarray(intermediate_weights[0][0] )
__lowerCamelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# intermediate dense
__lowerCamelCase = np.asarray(intermediate_weights[1][0] )
__lowerCamelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
# intermediate out
__lowerCamelCase = np.asarray(intermediate_weights[4][0] )
__lowerCamelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> Tuple:
# reformer model
__lowerCamelCase = torch_model.reformer
# word embeds
__lowerCamelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , )
if isinstance(weights[3] , __lowerCAmelCase ):
__lowerCamelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCamelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__lowerCamelCase = nn.Parameter(torch.tensor(__lowerCAmelCase ) )
__lowerCamelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# output layer norm
__lowerCamelCase = np.asarray(weights[7][0] )
__lowerCamelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# output embeddings
__lowerCamelCase = np.asarray(weights[9][0] )
__lowerCamelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> str:
# Initialise PyTorch model
__lowerCamelCase = ReformerConfig.from_json_file(__lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__lowerCamelCase = ReformerModelWithLMHead(__lowerCAmelCase )
with open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = pickle.load(__lowerCAmelCase )['''weights''']
set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 339 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( __lowerCAmelCase : Any ) -> int:
__lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0]
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(rows * cols * num_images )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
__lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
return data
@deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict:
__lowerCamelCase = labels_dense.shape[0]
__lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes
__lowerCamelCase = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase = 1
return labels_one_hot
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(__lowerCAmelCase )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase )
return labels
class lowerCAmelCase__ :
@deprecated(
SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__lowerCamelCase = 1_00_00
__lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase = images.astype(numpy.floataa )
__lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__lowerCamelCase = images
__lowerCamelCase = labels
__lowerCamelCase = 0
__lowerCamelCase = 0
@property
def __A ( self : str ) -> Optional[int]:
return self._images
@property
def __A ( self : Any ) -> Dict:
return self._labels
@property
def __A ( self : List[Any] ) -> int:
return self._num_examples
@property
def __A ( self : str ) -> Any:
return self._epochs_completed
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str:
if fake_data:
__lowerCamelCase = [1] * 7_84
__lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perma]
__lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase = self._num_examples - start
__lowerCamelCase = self._images[start : self._num_examples]
__lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perm]
__lowerCamelCase = self.labels[perm]
# Start next epoch
__lowerCamelCase = 0
__lowerCamelCase = batch_size - rest_num_examples
__lowerCamelCase = self._index_in_epoch
__lowerCamelCase = self._images[start:end]
__lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if not gfile.Exists(__lowerCAmelCase ):
gfile.MakeDirs(__lowerCAmelCase )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not gfile.Exists(__lowerCAmelCase ):
urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310
with gfile.GFile(__lowerCAmelCase ) as f:
__lowerCamelCase = f.size()
print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' )
return filepath
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase )
__lowerCamelCase = fake()
__lowerCamelCase = fake()
__lowerCamelCase = fake()
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
if not source_url: # empty string check
__lowerCamelCase = DEFAULT_SOURCE_URL
__lowerCamelCase = '''train-images-idx3-ubyte.gz'''
__lowerCamelCase = '''train-labels-idx1-ubyte.gz'''
__lowerCamelCase = '''t10k-images-idx3-ubyte.gz'''
__lowerCamelCase = '''t10k-labels-idx1-ubyte.gz'''
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
if not 0 <= validation_size <= len(__lowerCAmelCase ):
__lowerCamelCase = (
'''Validation size should be between 0 and '''
f'''{len(__lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(__lowerCAmelCase )
__lowerCamelCase = train_images[:validation_size]
__lowerCamelCase = train_labels[:validation_size]
__lowerCamelCase = train_images[validation_size:]
__lowerCamelCase = train_labels[validation_size:]
__lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
| 339 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> List[Any]:
_a : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_a : List[Any] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def _lowercase ( self : Optional[int] ) -> List[str]:
_a : Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_a : Tuple = get_activation("""gelu""" )
_a : Any = get_activation("""gelu_10""" )
_a : Any = torch_builtin(UpperCAmelCase__ )
_a : Union[str, Any] = geluaa(UpperCAmelCase__ )
_a : Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : int ) -> Optional[Any]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
_a : int = get_activation("""gelu""" )
_a : str = 1
_a : Optional[int] = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
_a : List[Any] = acta.a
| 294 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> None:
super().__init__(**__A )
a =size if size is not None else {'''shortest_edge''': 224}
a =get_size_dict(__A , default_to_square=__A )
a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
a =do_resize
a =size
a =resample
a =do_center_crop
a =crop_size
a =do_rescale
a =rescale_factor
a =do_normalize
a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a =image_std if image_std is not None else OPENAI_CLIP_STD
a =do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> Any:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
a =do_resize if do_resize is not None else self.do_resize
a =size if size is not None else self.size
a =get_size_dict(__A , param_name='''size''' , default_to_square=__A )
a =resample if resample is not None else self.resample
a =do_center_crop if do_center_crop is not None else self.do_center_crop
a =crop_size if crop_size is not None else self.crop_size
a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
a =do_rescale if do_rescale is not None else self.do_rescale
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =do_normalize if do_normalize is not None else self.do_normalize
a =image_mean if image_mean is not None else self.image_mean
a =image_std if image_std is not None else self.image_std
a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a =[convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_resize:
a =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A ) | 215 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = "https://pypi.org/pypi/diffusers/json"
SCREAMING_SNAKE_CASE : Tuple = json.loads(request.urlopen(_a).read())["releases"].keys()
return sorted(_a , key=lambda _a: version.Version(_a))
def lowerCamelCase__ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_a)
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : str = Path(_a) / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase__ ( _a):
init_hf_modules()
SCREAMING_SNAKE_CASE : Optional[int] = Path(_a) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : List[Any] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE : Optional[Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _a , flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _a , flags=re.MULTILINE)
# Unique-ify
return list(set(_a))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE : Dict = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE : List[str] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(_a).parent
SCREAMING_SNAKE_CASE : Union[str, Any] = [str(module_path / m) for m in new_imports]
SCREAMING_SNAKE_CASE : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE : Optional[Any] = [f"{f}.py" for f in new_import_files]
SCREAMING_SNAKE_CASE : Dict = len(_a) == 0
all_relative_imports.extend(_a)
return all_relative_imports
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE : Optional[int] = re.findall("^\s*import\s+(\S+)\s*$" , _a , flags=re.MULTILINE)
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _a , flags=re.MULTILINE)
# Only keep the top-level module
SCREAMING_SNAKE_CASE : Optional[int] = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE : List[str] = list(set(_a))
SCREAMING_SNAKE_CASE : List[str] = []
for imp in imports:
try:
importlib.import_module(_a)
except ImportError:
missing_packages.append(_a)
if len(_a) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"{', '.join(_a)}. Run `pip install {' '.join(_a)}`")
return get_relative_imports(_a)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = module_path.replace(os.path.sep , ".")
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(_a)
if class_name is None:
return find_pipeline_class(_a)
return getattr(_a , _a)
def lowerCamelCase__ ( _a):
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE : Optional[int] = dict(inspect.getmembers(_a , inspect.isclass))
SCREAMING_SNAKE_CASE : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _a)
and cls.__module__.split(".")[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}.")
SCREAMING_SNAKE_CASE : Any = cls
return pipeline_class
def lowerCamelCase__ ( _a , _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , ):
SCREAMING_SNAKE_CASE : Optional[int] = str(_a)
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(_a , _a)
if os.path.isfile(_a):
SCREAMING_SNAKE_CASE : str = module_file_or_url
SCREAMING_SNAKE_CASE : List[Any] = "local"
elif pretrained_model_name_or_path.count("/") == 0:
SCREAMING_SNAKE_CASE : Any = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE : Tuple = "v" + ".".join(__version__.split(".")[:3])
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"Defaulting to latest_version: {revision}.")
elif revision in available_versions:
SCREAMING_SNAKE_CASE : Optional[int] = f"v{revision}"
elif revision == "main":
SCREAMING_SNAKE_CASE : Dict = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'])}.")
# community pipeline on GitHub
SCREAMING_SNAKE_CASE : List[Any] = COMMUNITY_PIPELINES_URL.format(revision=_a , pipeline=_a)
try:
SCREAMING_SNAKE_CASE : List[str] = cached_download(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , )
SCREAMING_SNAKE_CASE : Any = "git"
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
_a , _a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , )
SCREAMING_SNAKE_CASE : Any = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/")))
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE : int = check_imports(_a)
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_a)
SCREAMING_SNAKE_CASE : str = Path(_a) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_a , submodule_path / module_file)
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE : str = f"{module_needed}.py"
shutil.copy(os.path.join(_a , _a) , submodule_path / module_needed)
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_a , _a):
SCREAMING_SNAKE_CASE : List[Any] = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE : Union[str, Any] = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Union[str, Any] = model_info(_a , revision=_a , token=_a).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE : str = submodule_path / commit_hash
SCREAMING_SNAKE_CASE : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_a)
if not (submodule_path / module_file).exists():
shutil.copy(_a , submodule_path / module_file)
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_a , f"{module_needed}.py" , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
return os.path.join(_a , _a)
def lowerCamelCase__ ( _a , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ):
SCREAMING_SNAKE_CASE : str = get_cached_module_file(
_a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
return get_class_in_module(_a , final_module.replace(".py" , "")) | 76 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = original_name.split(".")[0]
__UpperCamelCase : str = key.split(".")
__UpperCamelCase : int = int(key_list[key_list.index(_lowerCamelCase) - 2])
__UpperCamelCase : List[str] = int(key_list[key_list.index(_lowerCamelCase) - 1])
__UpperCamelCase : Union[str, Any] = orig_block_num - offset
__UpperCamelCase : Union[str, Any] = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}')
return key
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[int] = OrderedDict()
__UpperCamelCase , __UpperCamelCase : int = 0, 0
for key, value in state_dict.items():
if key.startswith("network"):
__UpperCamelCase : Tuple = key.replace("network" , "poolformer.encoder")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias") and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase : Dict = key[: key.find("proj")]
__UpperCamelCase : Union[str, Any] = key.replace(_lowerCamelCase , F'patch_embeddings.{total_embed_found}.')
__UpperCamelCase : Optional[int] = key.replace("proj" , "projection")
if key.endswith("bias"):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase : Tuple = "poolformer.encoder." + key
if "mlp.fc1" in key:
__UpperCamelCase : Optional[Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "mlp.fc1" , "output.conv1")
if "mlp.fc2" in key:
__UpperCamelCase : int = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "mlp.fc2" , "output.conv2")
if "norm1" in key:
__UpperCamelCase : str = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "norm1" , "before_norm")
if "norm2" in key:
__UpperCamelCase : Tuple = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "norm2" , "after_norm")
if "layer_scale_1" in key:
__UpperCamelCase : Tuple = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "layer_scale_1" , "layer_scale_1")
if "layer_scale_2" in key:
__UpperCamelCase : Dict = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "layer_scale_2" , "layer_scale_2")
if "head" in key:
__UpperCamelCase : Optional[int] = key.replace("head" , "classifier")
__UpperCamelCase : List[str] = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase : Optional[Any] = "huggingface/label-files"
__UpperCamelCase : Optional[Any] = model_name[-3:]
__UpperCamelCase : Any = 1_000
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Tuple = (1, 1_000)
# set config attributes
__UpperCamelCase : str = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset") , "r"))
__UpperCamelCase : Any = {int(_lowerCamelCase): v for k, v in idalabel.items()}
__UpperCamelCase : str = idalabel
__UpperCamelCase : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase : Dict = [2, 2, 6, 2]
__UpperCamelCase : List[Any] = [64, 128, 320, 512]
__UpperCamelCase : Tuple = 4.0
__UpperCamelCase : Dict = 0.9
elif size == "s24":
__UpperCamelCase : List[str] = [4, 4, 12, 4]
__UpperCamelCase : Union[str, Any] = [64, 128, 320, 512]
__UpperCamelCase : Any = 4.0
__UpperCamelCase : List[str] = 0.9
elif size == "s36":
__UpperCamelCase : Union[str, Any] = [6, 6, 18, 6]
__UpperCamelCase : Dict = [64, 128, 320, 512]
__UpperCamelCase : Tuple = 4.0
__UpperCamelCase : int = 1e-6
__UpperCamelCase : Any = 0.9
elif size == "m36":
__UpperCamelCase : List[str] = [6, 6, 18, 6]
__UpperCamelCase : Optional[Any] = [96, 192, 384, 768]
__UpperCamelCase : int = 4.0
__UpperCamelCase : List[Any] = 1e-6
__UpperCamelCase : Dict = 0.9_5
elif size == "m48":
__UpperCamelCase : Optional[int] = [8, 8, 24, 8]
__UpperCamelCase : Optional[int] = [96, 192, 384, 768]
__UpperCamelCase : Union[str, Any] = 4.0
__UpperCamelCase : Optional[Any] = 1e-6
__UpperCamelCase : Union[str, Any] = 0.9_5
else:
raise ValueError(F'Size {size} not supported')
# load image processor
__UpperCamelCase : List[Any] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase)
# Prepare image
__UpperCamelCase : Tuple = prepare_img()
__UpperCamelCase : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors="pt").pixel_values
logger.info(F'Converting model {model_name}...')
# load original state dict
__UpperCamelCase : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu"))
# rename keys
__UpperCamelCase : str = rename_keys(_lowerCamelCase)
# create HuggingFace model and load state dict
__UpperCamelCase : Dict = PoolFormerForImageClassification(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Define image processor
__UpperCamelCase : List[str] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase)
__UpperCamelCase : Dict = image_processor(images=prepare_img() , return_tensors="pt").pixel_values
# forward pass
__UpperCamelCase : Dict = model(_lowerCamelCase)
__UpperCamelCase : Dict = outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase : Optional[Any] = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9])
elif size == "s24":
__UpperCamelCase : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5])
elif size == "s36":
__UpperCamelCase : Tuple = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8])
elif size == "m36":
__UpperCamelCase : List[Any] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8])
elif size == "m48":
__UpperCamelCase : int = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3])
else:
raise ValueError(F'Size {size} not supported')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-2)
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...')
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase : List[str] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 151 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'segformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-6 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : Optional[Any] = num_encoder_blocks
UpperCAmelCase : Optional[Any] = depths
UpperCAmelCase : Dict = sr_ratios
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : Any = patch_sizes
UpperCAmelCase : Union[str, Any] = strides
UpperCAmelCase : Optional[Any] = mlp_ratios
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = classifier_dropout_prob
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : List[str] = drop_path_rate
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = decoder_hidden_size
UpperCAmelCase : int = kwargs.get("""reshape_last_stage""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
| 109 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _a ( unittest.TestCase ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> int:
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size if size is not None else {"height": 18, "width": 20}
_snake_case = do_thumbnail
_snake_case = do_align_axis
_snake_case = do_pad
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
def _lowercase ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = DonutImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Any:
_snake_case = DonutImageProcessingTester(self )
@property
def _lowercase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Optional[int]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"size" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"do_thumbnail" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"do_align_long_axis" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"do_pad" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"do_normalize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"image_mean" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,"image_std" ) )
def _lowercase ( self ) -> List[Any]:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 20} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{"height": 84, "width": 42} )
def _lowercase ( self ) -> str:
pass
@is_flaky()
def _lowercase ( self ) -> Union[str, Any]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
_snake_case = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
@is_flaky()
def _lowercase ( self ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
_snake_case = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
@is_flaky()
def _lowercase ( self ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
_snake_case = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
| 356 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ : Any = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __a ( _UpperCamelCase: str , _UpperCamelCase: Union[str, Any]=100 , _UpperCamelCase: List[str]=" " ) -> List[str]:
"""simple docstring"""
_snake_case = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __a ( _UpperCamelCase: dict ) -> dict:
"""simple docstring"""
_snake_case , _snake_case = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else "" )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __a ( _UpperCamelCase: dict , _UpperCamelCase: DPRContextEncoder , _UpperCamelCase: DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
_snake_case = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_UpperCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_snake_case = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __a ( _UpperCamelCase: "RagExampleArguments" , _UpperCamelCase: "ProcessingArguments" , _UpperCamelCase: "IndexHnswArguments" , ) -> Dict:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
_snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_snake_case = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_snake_case = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_UpperCamelCase )
# And save the index
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : str = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=__lowerCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 142 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
UpperCAmelCase__ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
UpperCAmelCase__ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float')),
"references": datasets.Sequence(datasets.Value('float')),
}
else:
return {
"predictions": datasets.Value('float'),
"references": datasets.Value('float'),
}
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str] , A : str=None , A : List[str]="uniform_average" , A : Tuple=True) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A)
return {"mse": mse}
| 339 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 1 |
'''simple docstring'''
UpperCamelCase_ : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase_ : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase_ : Union[str, Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase_ : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase_ : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase_ : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase_ : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase_ : Dict = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 142 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """vit_msn"""
def __init__( self ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-06 ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
| 142 | 1 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A_ : int = logging.getLogger(__name__)
class lowercase :
"""simple docstring"""
def __init__( self ) -> Dict:
_UpperCAmelCase : Optional[int] = False
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ) -> Optional[Any]:
if not self.initialized:
_UpperCAmelCase : Optional[int] = RagRetriever(
a_ ,question_encoder_tokenizer=a_ ,generator_tokenizer=a_ ,index=a_ ,init_retrieval=a_ ,)
_UpperCAmelCase : Dict = True
def _snake_case ( self ) -> Any:
self.retriever.index.init_index()
def _snake_case ( self ,a_ ,a_ ) -> int:
_UpperCAmelCase ,_UpperCAmelCase : int = self.retriever._main_retrieve(a_ ,a_ )
return doc_ids, retrieved_doc_embeds
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_=None ) -> Union[str, Any]:
if index is not None and index.is_initialized() and len(a_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
a_ ,question_encoder_tokenizer=a_ ,generator_tokenizer=a_ ,index=a_ ,init_retrieval=a_ ,)
_UpperCAmelCase : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a_ ,a_ ,a_ ,a_ )
for worker in self.retrieval_workers
] )
def _snake_case ( self ) -> Optional[int]:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self ,a_ ,a_ ) -> int:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_UpperCAmelCase : Union[str, Any] = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
_UpperCAmelCase ,_UpperCAmelCase : int = ray.get(random_worker.retrieve.remote(a_ ,a_ ) )
else:
_UpperCAmelCase ,_UpperCAmelCase : str = self._main_retrieve(a_ ,a_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a_ )
@classmethod
def _snake_case ( cls ,a_ ,a_=None ,**a_ ) -> Any:
return super(a_ ,cls ).get_tokenizers(a_ ,a_ ,**a_ )
@classmethod
def _snake_case ( cls ,a_ ,a_ ,a_=None ,**a_ ) -> str:
_UpperCAmelCase : Tuple = kwargs.pop("""config""" ,a_ ) or RagConfig.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : int = RagTokenizer.from_pretrained(a_ ,config=a_ )
_UpperCAmelCase : Dict = rag_tokenizer.question_encoder
_UpperCAmelCase : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
_UpperCAmelCase : Union[str, Any] = """custom"""
_UpperCAmelCase : Dict = CustomHFIndex(config.retrieval_vector_size ,a_ )
else:
_UpperCAmelCase : List[Any] = cls._build_index(a_ )
return cls(
a_ ,question_encoder_tokenizer=a_ ,generator_tokenizer=a_ ,retrieval_workers=a_ ,index=a_ ,)
| 215 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def snake_case_ ( lowerCAmelCase_ )-> typing.Counter[int]:
'''simple docstring'''
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_UpperCAmelCase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case_ ( lowerCAmelCase_ = 1000 )-> int:
'''simple docstring'''
_UpperCAmelCase : int = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 215 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self ,A ,A ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self ,A = 1 ,A = 2_000 ,A = None ,A = "pil" ,A = True ,**A ,):
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
UpperCAmelCase = randn_tensor(A ,generator=A ) * self.scheduler.init_noise_sigma
UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase = self.unet(A ,A ).sample
UpperCAmelCase = self.scheduler.step_correct(A ,A ,generator=A ).prev_sample
# prediction step
UpperCAmelCase = model(A ,A ).sample
UpperCAmelCase = self.scheduler.step_pred(A ,A ,A ,generator=A )
UpperCAmelCase , UpperCAmelCase = output.prev_sample, output.prev_sample_mean
UpperCAmelCase = sample_mean.clamp(0 ,1 )
UpperCAmelCase = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 234 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 151 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
UpperCAmelCase : int = number_of_bytes // partitions
UpperCAmelCase : List[str] = []
for i in range(UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = i * bytes_per_partition + 1
UpperCAmelCase : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase : List[Any] = list[tuple[int, int]]
_lowerCAmelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Dict = pos_x
_lowerCamelCase : Any = pos_y
_lowerCamelCase : Dict = (pos_y, pos_x)
_lowerCamelCase : List[Any] = goal_x
_lowerCamelCase : str = goal_y
_lowerCamelCase : Optional[Any] = g_cost
_lowerCamelCase : Tuple = parent
_lowerCamelCase : int = self.calculate_heuristic()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = abs(self.pos_x - self.goal_x )
_lowerCamelCase : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.f_cost < other.f_cost
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,a__ )
_lowerCamelCase : Any = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99_999 ,a__ )
_lowerCamelCase : List[Any] = [self.start]
_lowerCamelCase : Tuple = []
_lowerCamelCase : str = False
def _lowercase ( self: str ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCamelCase : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase : str = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
_lowerCamelCase : int = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
_lowerCamelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self: List[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for action in delta:
_lowerCamelCase : int = parent.pos_x + action[1]
_lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ ,a__ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,a__ ,) )
return successors
def _lowercase ( self: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = node
_lowerCamelCase : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCAmelCase : str = (0, 0)
_lowerCAmelCase : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
_lowerCAmelCase : List[str] = GreedyBestFirst(init, goal)
_lowerCAmelCase : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCAmelCase : Optional[Any] = 2
for elem in grid:
print(elem)
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
import torch
from transformers import AutoModel
class _UpperCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a : Optional[int]="sayef/fsner-bert-base-uncased" ) -> Any:
"""simple docstring"""
super(a , self ).__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModel.from_pretrained(a , return_dict=a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
SCREAMING_SNAKE_CASE : str = torch.nn.Softmax(dim=1 )
def __UpperCamelCase ( self : int , **a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.bert(**a ).last_hidden_state
def __UpperCamelCase ( self : Optional[int] , a : List[str] ) -> str:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=a )
def __UpperCamelCase ( self : Optional[int] , a : List[Any] , a : List[Any] , a : Optional[int]=1 ) -> Union[str, Any]:
"""simple docstring"""
return self.softmax(T * self.cos(a , a ) )
def __UpperCamelCase ( self : Union[str, Any] , a : List[str] , a : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = W_supports['''sizes'''].tolist()
SCREAMING_SNAKE_CASE : List[Any] = W_supports['''start_token_id'''].item()
SCREAMING_SNAKE_CASE : Optional[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
SCREAMING_SNAKE_CASE : str = self.BERT(**a )
SCREAMING_SNAKE_CASE : Optional[int] = self.BERT(**a )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Dict = W_supports['''input_ids'''] == start_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a ):
if i == 0:
SCREAMING_SNAKE_CASE : List[str] = 0
else:
SCREAMING_SNAKE_CASE : Optional[Any] = support_sizes[i - 1]
SCREAMING_SNAKE_CASE : Dict = S[s : s + size][start_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE : Tuple = S[s : s + size][end_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
SCREAMING_SNAKE_CASE : Tuple = torch.vstack((p_starts, p_start) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
SCREAMING_SNAKE_CASE : Any = p_start
SCREAMING_SNAKE_CASE : List[str] = p_end
return p_starts, p_ends | 76 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Any ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = data
lowerCamelCase__ : Any = None
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->str:
lowerCamelCase__ : Any = None
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCamelCase__ : Dict = temp.next
print()
def __lowerCamelCase ( self : Dict , A : Any ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = Node(A )
lowerCamelCase__ : Dict = self.head
lowerCamelCase__ : List[str] = new_node
def __lowerCamelCase ( self : Optional[int] , A : int , A : Tuple ) ->List[Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Union[str, Any] = node_a.next
lowerCamelCase__ : int = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase__ , lowerCamelCase__ : str = node_a.data, node_a.data
if __name__ == "__main__":
_A : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 142 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ["""image_processor""", """tokenizer"""]
_lowerCAmelCase : List[str] = """Pix2StructImageProcessor"""
_lowerCAmelCase : List[Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = False
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self , lowerCAmelCase=None , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 20_48 , lowerCAmelCase = 0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = True , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
snake_case = self.tokenizer
snake_case = self.tokenizer(
text=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
snake_case = self.image_processor(
lowerCAmelCase , return_tensors=lowerCAmelCase , max_patches=lowerCAmelCase , **lowerCAmelCase )
else:
# add pixel_values and bbox
snake_case = self.image_processor(
lowerCAmelCase , return_tensors=lowerCAmelCase , max_patches=lowerCAmelCase , header_text=lowerCAmelCase , **lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
snake_case = self.tokenizer(
text=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
if "attention_mask" in text_encoding:
snake_case = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
snake_case = text_encoding.pop('input_ids' )
else:
snake_case = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase )
return encoding_image_processor
def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 149 | """simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> int:
"""simple docstring"""
snake_case = {}
snake_case = job['started_at']
snake_case = job['completed_at']
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
snake_case = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" , headers=_UpperCamelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 149 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_A : Union[str, Any] = 'pytorch_model.bin'
_A : Optional[Any] = 'pytorch_model.bin.index.json'
_A : Optional[Any] = 'adapter_config.json'
_A : str = 'adapter_model.bin'
_A : Any = 'adapter_model.safetensors'
_A : List[Any] = 'tf_model.h5'
_A : Optional[int] = 'tf_model.h5.index.json'
_A : Union[str, Any] = 'model.ckpt'
_A : List[Any] = 'flax_model.msgpack'
_A : List[str] = 'flax_model.msgpack.index.json'
_A : Optional[int] = 'model.safetensors'
_A : Optional[int] = 'model.safetensors.index.json'
_A : str = 'config.json'
_A : List[Any] = 'preprocessor_config.json'
_A : Tuple = FEATURE_EXTRACTOR_NAME
_A : Optional[int] = 'generation_config.json'
_A : Optional[int] = 'modelcard.json'
_A : Tuple = '▁'
_A : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_A : Dict = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_A : int = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_A : Union[str, Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if version.parse(UpperCAmelCase ) < version.parse(UpperCAmelCase ):
if "dev" in min_version:
lowerCamelCase__ : Tuple = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
lowerCamelCase__ : Any = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 142 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''file.csv'''
lowerCamelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = tmp_path / '''malformed_file.csv'''
lowerCamelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = tmp_path / '''csv_with_image.csv'''
lowerCamelCase__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = tmp_path / '''csv_with_label.csv'''
lowerCamelCase__ : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
@pytest.fixture
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write(UpperCAmelCase )
return str(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = Csv()
lowerCamelCase__ : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()[1]
lowerCamelCase__ : Any = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCamelCase__ : List[str] = csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__ : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase , encoding='''utf-8''' ) as f:
lowerCamelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCamelCase__ : List[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCamelCase__ : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase ) for label in labels]
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase : [int(UpperCAmelCase ) for i in x.split()]} )
lowerCamelCase__ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCamelCase__ : Tuple = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 142 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = """T5Config"""
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
| 369 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
metadata={'help': 'The output directory where the model will be written.'} , )
lowerCAmelCase__ = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
lowerCAmelCase__ = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCamelCase_ ( ):
lowerCamelCase_ = HfArgumentParser((ModelArguments,) )
((lowerCamelCase_) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowerCamelCase_ = decoder_config.decoder_start_token_id
lowerCamelCase_ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowerCamelCase_ = decoder_config.bos_token_id
if pad_token_id is None:
lowerCamelCase_ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowerCamelCase_ = decoder_config.eos_token_id
lowerCamelCase_ = decoder_start_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 19 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase__ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_model(
"HTSAT-tiny" , "roberta" , __lowerCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : str = R".*sequential.(\d+).*"
_UpperCAmelCase : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase : Union[str, Any] = key.replace(__lowerCAmelCase , __lowerCAmelCase )
if re.match(__lowerCAmelCase , __lowerCAmelCase ):
# replace sequential layers with list
_UpperCAmelCase : List[Any] = re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 )
_UpperCAmelCase : Optional[int] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__lowerCAmelCase )//3}.linear.""" )
elif re.match(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase : str = 1 if projecton_layer == 0 else 2
_UpperCAmelCase : Tuple = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase : List[str] = value
_UpperCAmelCase : Tuple = mixed_qkv.size(0 ) // 3
_UpperCAmelCase : Union[str, Any] = mixed_qkv[:qkv_dim]
_UpperCAmelCase : int = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase : List[Any] = query_layer
_UpperCAmelCase : int = key_layer
_UpperCAmelCase : Any = value_layer
else:
_UpperCAmelCase : Dict = value
return model_state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = init_clap(__lowerCAmelCase , enable_fusion=__lowerCAmelCase )
clap_model.eval()
_UpperCAmelCase : List[str] = clap_model.state_dict()
_UpperCAmelCase : str = rename_state_dict(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = ClapConfig()
_UpperCAmelCase : str = enable_fusion
_UpperCAmelCase : Union[str, Any] = ClapModel(__lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
transformers_config.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234 | 0 |
from typing import Any
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = data
_A = None
class a :
"""simple docstring"""
def __init__( self ) -> int:
_A = None
def UpperCAmelCase ( self ) -> str:
_A = self.head
while temp is not None:
print(temp.data , end=""" """ )
_A = temp.next
print()
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_A = Node(lowerCAmelCase_ )
_A = self.head
_A = new_node
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
if node_data_a == node_data_a:
return
else:
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
if node_a is None or node_a is None:
return
_A , _A = node_a.data, node_a.data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 81 | def snake_case ( snake_case__ :str , snake_case__ :str) -> list:
_A = len(snake_case__)
_A = []
for i in range(len(snake_case__) - pat_len + 1):
_A = True
for j in range(snake_case__):
if s[i + j] != pattern[j]:
_A = False
break
if match_found:
position.append(snake_case__)
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 81 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def snake_case_ ( A_ : str, A_ : float | Decimal, A_ : float = 10**-10 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = a
while True:
_lowerCamelCase : Union[str, Any] = Decimal(A_ ) - (
Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(A_ ) ) < precision: # noqa: S307
return float(A_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 72 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
_lowerCamelCase : Any = FunnelConfig.from_json_file(lowercase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : Optional[int] = FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 12 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 1 |
def lowerCAmelCase_ ( A_ ,A_):
return x if y == 0 else greatest_common_divisor(A_ ,x % y)
def lowerCAmelCase_ ( A_ ,A_):
return (x * y) // greatest_common_divisor(A_ ,A_)
def lowerCAmelCase_ ( A_ = 20):
UpperCamelCase__: List[str] = 1
for i in range(1 ,n + 1):
UpperCamelCase__: Optional[Any] = lcm(A_ ,A_)
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 149 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 5_0257 , __lowerCamelCase: int = 1024 , __lowerCamelCase: int = 768 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "gelu_new" , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__: Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
UpperCamelCase__: List[str] = prefix_inner_dim
UpperCamelCase__: Optional[int] = prefix_hidden_dim
UpperCamelCase__: Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase__: Tuple = (
nn.Linear(self.prefix_hidden_dim , __lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase__: List[str] = GPTaConfig(
vocab_size=__lowerCamelCase , n_positions=__lowerCamelCase , n_embd=__lowerCamelCase , n_layer=__lowerCamelCase , n_head=__lowerCamelCase , n_inner=__lowerCamelCase , activation_function=__lowerCamelCase , resid_pdrop=__lowerCamelCase , embd_pdrop=__lowerCamelCase , attn_pdrop=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , initializer_range=__lowerCamelCase , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , scale_attn_by_inverse_layer_idx=__lowerCamelCase , reorder_and_upcast_attn=__lowerCamelCase , )
UpperCamelCase__: Any = GPTaLMHeadModel(__lowerCamelCase )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
UpperCamelCase__: Dict = self.encode_prefix(__lowerCamelCase )
UpperCamelCase__: List[Any] = self.decode_prefix(__lowerCamelCase )
UpperCamelCase__: str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase__: Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase__: Any = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase__: str = self.transformer(inputs_embeds=__lowerCamelCase , labels=__lowerCamelCase , attention_mask=__lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int , __lowerCamelCase: torch.device ):
'''simple docstring'''
return torch.zeros(__lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return self.encode_prefix(__lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = torch.split(__lowerCamelCase , 1 , dim=0 )
UpperCamelCase__: Dict = []
UpperCamelCase__: Union[str, Any] = []
for feature in features:
UpperCamelCase__: Tuple = self.decode_prefix(feature.to(__lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.generate_beam(
input_embeds=__lowerCamelCase , device=__lowerCamelCase , eos_token_id=__lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 67 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: Optional[int] = None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = eos_token_id
UpperCamelCase__: List[str] = None
UpperCamelCase__: Any = None
UpperCamelCase__: Optional[int] = torch.ones(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.int )
UpperCamelCase__: Dict = torch.zeros(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase__: Dict = input_embeds
else:
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = self.transformer(inputs_embeds=__lowerCamelCase )
UpperCamelCase__: Tuple = outputs.logits
UpperCamelCase__: Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase__: List[str] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = logits.topk(__lowerCamelCase , -1 )
UpperCamelCase__: str = generated.expand(__lowerCamelCase , *generated.shape[1:] )
UpperCamelCase__ , UpperCamelCase__: Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase__: int = next_tokens
else:
UpperCamelCase__: Optional[int] = tokens.expand(__lowerCamelCase , *tokens.shape[1:] )
UpperCamelCase__: str = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase__: Optional[Any] = -float(np.inf )
UpperCamelCase__: Any = 0
UpperCamelCase__: List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase__: Any = scores_sum / seq_lengths[:, None]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCamelCase , -1 )
UpperCamelCase__: Dict = next_tokens // scores_sum.shape[1]
UpperCamelCase__: Optional[int] = seq_lengths[next_tokens_source]
UpperCamelCase__: int = next_tokens % scores_sum.shape[1]
UpperCamelCase__: Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase__: Tuple = tokens[next_tokens_source]
UpperCamelCase__: Tuple = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase__: List[Any] = generated[next_tokens_source]
UpperCamelCase__: int = scores_sum_average * seq_lengths
UpperCamelCase__: Dict = is_stopped[next_tokens_source]
UpperCamelCase__: List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase__: Any = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase__: Union[str, Any] = is_stopped + next_tokens.eq(__lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCamelCase__: Optional[Any] = scores / seq_lengths
UpperCamelCase__: int = scores.argsort(descending=__lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCamelCase__: Dict = [tokens[i] for i in order]
UpperCamelCase__: Any = torch.stack(__lowerCamelCase , dim=0 )
UpperCamelCase__: int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149 | 1 |
'''simple docstring'''
UpperCAmelCase = 8.31_4462 # Unit - J mol-1 K-1
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 187 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = 3
lowerCAmelCase = 250
lowerCAmelCase = ids_tensor((batch_size, length) , A_ )
lowerCAmelCase = torch.ones((batch_size, length) , device=A_ , dtype=torch.float ) / length
return input_ids, scores
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = MaxLengthCriteria(max_length=10 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A_ ) , 1 ) | 187 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = state_dict.pop(snake_case__ )
UpperCAmelCase_ : Tuple = val
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
UpperCAmelCase_ : int = value
else:
UpperCAmelCase_ : Any = value
return new_state_dict
def snake_case ( A__ ,A__=False ):
UpperCAmelCase_ : Union[str, Any] = ""
if is_panoptic:
UpperCAmelCase_ : Optional[int] = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : Dict = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : List[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : List[str] = in_proj_bias[:2_56]
UpperCAmelCase_ : str = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : int = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : Any = in_proj_bias[-2_56:]
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ : Any = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ : Optional[int] = 2_50
else:
UpperCAmelCase_ : str = 91
UpperCAmelCase_ : Optional[int] = "huggingface/label-files"
UpperCAmelCase_ : Any = "coco-detection-id2label.json"
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Dict = idalabel
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ : int = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ : Union[str, Any] = ConditionalDetrImageProcessor(format=snake_case__ )
# prepare image
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : str = image_processor(images=snake_case__ ,return_tensors="pt" )
UpperCAmelCase_ : str = encoding["pixel_values"]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ : Tuple = torch.hub.load("DeppMeng/ConditionalDETR" ,snake_case__ ,pretrained=snake_case__ ).eval()
UpperCAmelCase_ : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ : str = "conditional_detr." + src
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
UpperCAmelCase_ : Any = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ ,is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : Optional[Any] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ : List[Any] = state_dict.pop(snake_case__ )
UpperCAmelCase_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ : Tuple = state_dict.pop(snake_case__ )
UpperCAmelCase_ : Dict = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ : Optional[Any] = state_dict.pop(snake_case__ )
UpperCAmelCase_ : Optional[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ : Optional[Any] = state_dict.pop(snake_case__ )
UpperCAmelCase_ : List[Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ : Optional[Any] = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
model.push_to_hub(repo_id=snake_case__ ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ : Dict = conditional_detr(snake_case__ )
UpperCAmelCase_ : Any = model(snake_case__ )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 268 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self : Dict , snake_case__ : str , snake_case__ : Dict=12 , snake_case__ : Union[str, Any]=7 , snake_case__ : int=True , snake_case__ : str=True , snake_case__ : str=True , snake_case__ : Any=99 , snake_case__ : Dict=32 , snake_case__ : Tuple=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Tuple=37 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[int]=512 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Optional[int]=0 , snake_case__ : Optional[int]=None , ):
lowerCamelCase_ : List[str] =parent
lowerCamelCase_ : List[str] =batch_size
lowerCamelCase_ : Any =seq_length
lowerCamelCase_ : List[Any] =is_training
lowerCamelCase_ : List[str] =use_input_mask
lowerCamelCase_ : int =use_labels
lowerCamelCase_ : int =vocab_size
lowerCamelCase_ : List[str] =hidden_size
lowerCamelCase_ : str =projection_dim
lowerCamelCase_ : Any =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : Any =intermediate_size
lowerCamelCase_ : int =dropout
lowerCamelCase_ : Tuple =attention_dropout
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : List[str] =initializer_range
lowerCamelCase_ : Optional[Any] =scope
lowerCamelCase_ : List[Any] =bos_token_id
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str =None
if self.use_input_mask:
lowerCamelCase_ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase_ : List[Any] =input_mask.numpy()
lowerCamelCase_ : Optional[int] =input_mask.shape
lowerCamelCase_ : int =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
lowerCamelCase_ : Optional[int] =1
lowerCamelCase_ : Union[str, Any] =0
lowerCamelCase_ : Optional[int] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : Dict ):
lowerCamelCase_ : Any =TFBlipTextModel(config=snake_case__ )
lowerCamelCase_ : str =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
lowerCamelCase_ : int =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase_ : Optional[Any] =config_and_inputs
lowerCamelCase_ : int ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :List[str] = (TFBlipTextModel,) if is_tf_available() else ()
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Union[str, Any] = False
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : int =BlipTextModelTester(self )
lowerCamelCase_ : Optional[Any] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : int ):
pass
def UpperCAmelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase__ ( self : int ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase__ ( self : int ):
pass
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Tuple =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : int=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 360 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=13 , snake_case__ : str=7 , snake_case__ : Union[str, Any]=6 , snake_case__ : str=17 , snake_case__ : Any=23 , snake_case__ : int=11 , snake_case__ : Tuple=True , ):
lowerCamelCase_ : str =parent
lowerCamelCase_ : Union[str, Any] =batch_size
lowerCamelCase_ : List[Any] =seq_length
lowerCamelCase_ : Union[str, Any] =act_dim
lowerCamelCase_ : Optional[Any] =state_dim
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : Tuple =max_length
lowerCamelCase_ : List[Any] =is_training
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCamelCase_ : List[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : List[Any] =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowerCamelCase_ : Optional[int] =random_attention_mask((self.batch_size, self.seq_length) )
lowerCamelCase_ : List[str] =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self : Any ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , ):
lowerCamelCase_ : Tuple =DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : str =model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Optional[int] =config_and_inputs
lowerCamelCase_ : Optional[int] ={
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCAmelCase :int = ()
_UpperCAmelCase :int = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCAmelCase :Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =DecisionTransformerModelTester(self )
lowerCamelCase_ : str =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str =DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
lowerCamelCase_ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[str] =[
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =2 # number of steps of autoregressive prediction we will perform
lowerCamelCase_ : int =10 # defined by the RL environment, may be normalized
lowerCamelCase_ : List[Any] =DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowerCamelCase_ : Union[str, Any] =model.to(snake_case__ )
lowerCamelCase_ : Any =model.config
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] =torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=snake_case__ )
lowerCamelCase_ : int =torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCamelCase_ : str =state
lowerCamelCase_ : Optional[int] =torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : int =torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : Tuple =torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
lowerCamelCase_ : str =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Union[str, Any] =torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Optional[int] =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCamelCase_ : str =action_pred[0, -1]
lowerCamelCase_ : Optional[int] =torch.cat([states, state] , dim=1 )
lowerCamelCase_ : Optional[Any] =returns_to_go[0, -1] - reward
lowerCamelCase_ : str =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCamelCase_ : int =torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 209 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase_ : Dict = logging.getLogger(__name__)
lowerCamelCase_ : Tuple = """pytorch_model.bin"""
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The name of the task to train on."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowerCAmelCase = dataclasses.field(
default="accuracy", metadata={"help": "The evaluation metric used for the task."} )
__lowerCAmelCase = dataclasses.field(
default="no", metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
}, )
__lowerCAmelCase = dataclasses.field(
default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, )
__lowerCAmelCase = dataclasses.field(
default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Random seed for initialization."}, )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
a =dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a =int(eval_result * len(lowercase ) )
print(lowercase )
a =dataset.sort('''probability''' , reverse=lowercase )
a =dataset.select(range(lowercase ) )
a =dataset.remove_columns(['''label''', '''probability'''] )
a =dataset.rename_column('''prediction''' , '''label''' )
a =dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
a =dataset.shuffle(seed=args.seed )
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def _A ( lowercase , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a =STModelArguments(model_name_or_path=lowercase )
a =STDataArguments(train_file=lowercase , infer_file=lowercase )
a =STTrainingArguments(output_dir=lowercase )
a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
a ={}
a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a =args.train_file
a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a =args.eval_file
for key in data_files:
a =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
a =extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
a =f'''{args.output_dir}/self-train_iter-{{}}'''.format
a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
a =None
a =None
a =0
a =False
# Show the progress bar
a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
a =data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a =os.path.join(lowercase , '''stage-1''' )
a ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a =os.path.join(lowercase , '''best-checkpoint''' )
a =os.path.join(lowercase , '''stage-2''' )
# Update arguments_dict
a =model_path
a =data_files['''train''']
a =current_output_dir
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase )
a =iteration
a =data_dir_format(iteration + 1 )
a =AutoConfig.from_pretrained(os.path.join(lowercase , '''best-checkpoint''' ) )
a =config.idalabel
a =os.path.join(lowercase , '''eval_results_best-checkpoint.json''' )
a =os.path.join(lowercase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase )
with open(lowercase , '''r''' ) as f:
a =float(json.load(lowercase )[args.eval_metric] )
a =os.path.join(lowercase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
a =load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
a =load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
a =eval_result
if best_iteration is None:
a =new_iteration
a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a =new_iteration
a =new_eval_result
a =0
else:
if new_eval_result == best_eval_result:
a =new_iteration
a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , ) | 81 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
while second != 0:
a =first & second
first ^= second
a =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Dict = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : List[Any] = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }') | 81 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = 'ResNetConfig'
# Base docstring
_snake_case = 'microsoft/resnet-50'
_snake_case = [1, 2048, 7, 7]
# Image classification docstring
_snake_case = 'microsoft/resnet-50'
_snake_case = 'tiger cat'
_snake_case = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "relu" ) -> Any:
super().__init__()
_a : Union[str, Any] = nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , bias=UpperCAmelCase__ )
_a : Union[str, Any] = nn.BatchNormad(UpperCAmelCase__ )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowercase ( self : Any , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : Dict = self.convolution(UpperCAmelCase__ )
_a : Optional[int] = self.normalization(UpperCAmelCase__ )
_a : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : ResNetConfig ) -> Optional[int]:
super().__init__()
_a : List[str] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : int = config.num_channels
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : Optional[int] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_a : List[Any] = self.embedder(UpperCAmelCase__ )
_a : Union[str, Any] = self.pooler(UpperCAmelCase__ )
return embedding
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 ) -> List[str]:
super().__init__()
_a : Tuple = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ )
_a : List[Any] = nn.BatchNormad(UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : Optional[Any] = self.convolution(UpperCAmelCase__ )
_a : Optional[Any] = self.normalization(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "relu" ) -> Any:
super().__init__()
_a : List[str] = in_channels != out_channels or stride != 1
_a : Dict = (
ResNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_a : int = nn.Sequential(
ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) , ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , activation=UpperCAmelCase__ ) , )
_a : List[Any] = ACTaFN[activation]
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
_a : str = hidden_state
_a : Tuple = self.layer(UpperCAmelCase__ )
_a : Union[str, Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
_a : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "relu" , UpperCAmelCase__ : int = 4 ) -> Optional[int]:
super().__init__()
_a : int = in_channels != out_channels or stride != 1
_a : Dict = out_channels // reduction
_a : Optional[int] = (
ResNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_a : List[str] = nn.Sequential(
ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) , ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
_a : Optional[Any] = ACTaFN[activation]
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
_a : Optional[int] = hidden_state
_a : Tuple = self.layer(UpperCAmelCase__ )
_a : Optional[Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
_a : List[Any] = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : ResNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) -> List[str]:
super().__init__()
_a : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_a : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , activation=config.hidden_act ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowercase ( self : Any , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : int = input
for layer in self.layers:
_a : int = layer(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase__ : ResNetConfig ) -> List[Any]:
super().__init__()
_a : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
_a : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : int = hidden_states + (hidden_state,)
_a : Optional[int] = stage_module(UpperCAmelCase__ )
if output_hidden_states:
_a : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , )
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[Any] = ResNetConfig
UpperCamelCase : Tuple = '''resnet'''
UpperCamelCase : Union[str, Any] = '''pixel_values'''
UpperCamelCase : Any = True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Optional[int]:
if isinstance(UpperCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=False ) -> Optional[Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Tuple = value
_snake_case = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_snake_case = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , snake_case_ , )
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
super().__init__(UpperCAmelCase__ )
_a : Union[str, Any] = config
_a : Optional[Any] = ResNetEmbeddings(UpperCAmelCase__ )
_a : Union[str, Any] = ResNetEncoder(UpperCAmelCase__ )
_a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : Any , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : Union[str, Any] = self.embedder(UpperCAmelCase__ )
_a : Dict = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
_a : Union[str, Any] = encoder_outputs[0]
_a : Any = self.pooler(UpperCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict ) -> Dict:
super().__init__(UpperCAmelCase__ )
_a : Union[str, Any] = config.num_labels
_a : int = ResNetModel(UpperCAmelCase__ )
# classification head
_a : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : Union[str, Any] = self.resnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
_a : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_a : Any = self.classifier(UpperCAmelCase__ )
_a : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Any = """single_label_classification"""
else:
_a : str = """multi_label_classification"""
if self.config.problem_type == "regression":
_a : Dict = MSELoss()
if self.num_labels == 1:
_a : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : Optional[Any] = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_a : Dict = CrossEntropyLoss()
_a : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : Tuple = BCEWithLogitsLoss()
_a : Tuple = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
_a : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , snake_case_ , )
class UpperCamelCase ( snake_case_ , snake_case_ ):
def __init__( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple:
super().__init__(UpperCAmelCase__ )
super()._init_backbone(UpperCAmelCase__ )
_a : Dict = [config.embedding_size] + config.hidden_sizes
_a : List[str] = ResNetEmbeddings(UpperCAmelCase__ )
_a : Optional[Any] = ResNetEncoder(UpperCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@replace_return_docstrings(output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
_a : int = return_dict if return_dict is not None else self.config.use_return_dict
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Any = self.embedder(UpperCAmelCase__ )
_a : Dict = self.encoder(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
_a : Union[str, Any] = outputs.hidden_states
_a : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase__ , )
| 324 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A__ : List[str] , A__ : Any , A__ : Union[str, Any] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = FunnelConfig.from_json_file(A__ )
print(f'Building PyTorch model from configuration: {config}' )
__lowerCamelCase = FunnelBaseModel(A__ ) if base_model else FunnelModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A__ , A__ , A__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 12 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.model"}
lowercase_ = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
lowercase_ = {
"google/rembert": 256,
}
class A ( __lowercase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict,lowercase_ : Dict,lowercase_ : Tuple=False,lowercase_ : Dict=True,lowercase_ : Tuple=True,lowercase_ : int="[CLS]",lowercase_ : List[Any]="[SEP]",lowercase_ : int="[UNK]",lowercase_ : Union[str, Any]="[SEP]",lowercase_ : Optional[int]="[PAD]",lowercase_ : Dict="[CLS]",lowercase_ : List[str]="[MASK]",**lowercase_ : Optional[Any],)-> str:
'''simple docstring'''
super().__init__(
do_lower_case=_a,remove_space=_a,keep_accents=_a,bos_token=_a,eos_token=_a,unk_token=_a,sep_token=_a,pad_token=_a,cls_token=_a,mask_token=_a,**_a,)
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor()
self.sp_model.Load(_a )
@property
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return len(self.sp_model )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Dict,lowercase_ : List[str] )-> Any:
'''simple docstring'''
A__ = d
A__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Optional[int]=False )-> Any:
'''simple docstring'''
A__ = self.sp_model.EncodeAsPieces(_a )
return pieces
def snake_case__ ( self : Any,lowercase_ : List[str] )-> Any:
'''simple docstring'''
return self.sp_model.PieceToId(_a )
def snake_case__ ( self : Optional[int],lowercase_ : List[Any] )-> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(_a )
def snake_case__ ( self : Optional[int],lowercase_ : List[str] )-> Optional[int]:
'''simple docstring'''
A__ = self.sp_model.decode_pieces(_a )
return out_string
def snake_case__ ( self : List[Any],lowercase_ : Union[str, Any],lowercase_ : Union[str, Any] = None )-> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : Optional[Any],lowercase_ : List[str],lowercase_ : List[Any] = None,lowercase_ : List[Any] = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any] = None )-> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : List[Any] = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error('Vocabulary path ({}) should be a directory'.format(_a ) )
return
A__ = os.path.join(
_a,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file,_a )
return (out_vocab_file,)
| 350 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="attention" ) -> Union[str, Any]:
'''simple docstring'''
A__ = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
A__ = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
A__ = (wi_a, wi_a)
else:
A__ = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def _snake_case( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> int:
'''simple docstring'''
A__ = traverse_util.flatten_dict(variables['target'] )
A__ = {'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
'encoder/relpos_bias/rel_embedding'
].T
A__ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old['decoder/decoder_norm/scale']
A__ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old['decoder/logits_dense/kernel'].T
return new
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
A__ = state_dict['shared.weight']
return state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
A__ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ )
A__ = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
'''simple docstring'''
A__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
A__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 282 | 0 |
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(_A ) * abs(_A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 187 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187 | 1 |
from bisect import bisect
from itertools import accumulate
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Any ,__lowercase : Dict ,__lowercase : Tuple ):
'''simple docstring'''
A_ : List[str] = sorted(zip(__lowercase ,__lowercase ) ,key=lambda __lowercase : x[0] / x[1] ,reverse=__lowercase )
A_ , A_ : Tuple = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(__lowercase ) )
A_ : Dict = bisect(__lowercase ,__lowercase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(lowercase , normalizer_state.pop('type' ) )
A_ : Optional[int] = do_lower_case
A_ : Optional[Any] = strip_accents
A_ : str = tokenize_chinese_chars
A_ : Any = normalizer_class(**lowercase )
A_ : Tuple = do_lower_case
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 192 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
__snake_case = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
__snake_case = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any =['input_ids', 'attention_mask']
UpperCamelCase_ : Optional[int] =[]
UpperCamelCase_ : int =[]
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="m2m100" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=8 , **SCREAMING_SNAKE_CASE_ , ) -> str:
UpperCamelCase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase :Any = language_codes
UpperCamelCase :str = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCamelCase :Any = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCamelCase :Dict = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase :Any = vocab_file
UpperCamelCase :Union[str, Any] = load_json(__lowerCAmelCase )
UpperCamelCase :str = {v: k for k, v in self.encoder.items()}
UpperCamelCase :str = spm_file
UpperCamelCase :Union[str, Any] = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
UpperCamelCase :Tuple = len(self.encoder )
UpperCamelCase :Any = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
UpperCamelCase :Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
UpperCamelCase :Dict = {v: k for k, v in self.lang_token_to_id.items()}
UpperCamelCase :Union[str, Any] = src_lang if src_lang is not None else '''en'''
UpperCamelCase :Optional[int] = tgt_lang
UpperCamelCase :str = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCamelCase :Optional[int] = num_madeup_words
@property
def UpperCAmelCase ( self ) -> List[str]:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase ( self ) -> Tuple:
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :List[str] = []
UpperCamelCase :Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
UpperCamelCase :Optional[Any] = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> Dict:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
UpperCamelCase :Any = [1] * len(self.prefix_tokens )
UpperCamelCase :List[str] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
UpperCamelCase :Tuple = self.__dict__.copy()
UpperCamelCase :Dict = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :str = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[str]:
UpperCamelCase :str = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
UpperCamelCase :Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCamelCase :Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
UpperCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "en" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "ro" , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
UpperCamelCase :Optional[int] = src_lang
UpperCamelCase :Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase :Dict = src_lang
UpperCamelCase :int = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase :Optional[Any] = self.get_lang_id(__lowerCAmelCase )
UpperCamelCase :int = tgt_lang_id
return inputs
def UpperCAmelCase ( self ) -> Optional[int]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Tuple = self.get_lang_token(__lowerCAmelCase )
UpperCamelCase :int = self.lang_token_to_id[lang_token]
UpperCamelCase :int = [self.cur_lang_id]
UpperCamelCase :Any = [self.eos_token_id]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Union[str, Any] = self.get_lang_token(__lowerCAmelCase )
UpperCamelCase :Union[str, Any] = self.lang_token_to_id[lang_token]
UpperCamelCase :int = [self.cur_lang_id]
UpperCamelCase :Optional[Any] = [self.eos_token_id]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return self.lang_code_to_token[lang]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Optional[Any] = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :Any = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(__snake_case , '''r''' ) as f:
return json.load(__snake_case )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 259 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCAmelCase )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCAmelCase ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 209 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False ) -> Optional[Any]:
lowerCAmelCase__ = scheduler
lowerCAmelCase__ = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
lowerCAmelCase__ = split_batches
lowerCAmelCase__ = step_with_optimizer
lowerCAmelCase__ = GradientState()
def a ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase__ = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def a ( self : Any ) -> List[str]:
return self.scheduler.get_last_lr()
def a ( self : Tuple ) -> List[str]:
return self.scheduler.state_dict()
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
self.scheduler.load_state_dict(UpperCamelCase_ )
def a ( self : Any ) -> Optional[Any]:
return self.scheduler.get_lr()
def a ( self : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Any:
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 355 |
import random
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase_ )
elif element > pivot:
greater.append(lowerCAmelCase_ )
else:
equal.append(lowerCAmelCase_ )
return less, equal, greater
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int ):
"""simple docstring"""
if index >= len(lowerCAmelCase_ ) or index < 0:
return None
lowerCAmelCase__ = items[random.randint(0 , len(lowerCAmelCase_ ) - 1 )]
lowerCAmelCase__ = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _partition(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase_ , lowerCAmelCase_ )
# must be in larger
else:
return quick_select(lowerCAmelCase_ , index - (m + count) )
| 221 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=0.2 , lowerCAmelCase__ : Optional[int]=0.2 ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = bp_numa
_UpperCamelCase = bp_numa
_UpperCamelCase = bp_numa
_UpperCamelCase = conva_get[:2]
_UpperCamelCase = conva_get[2]
_UpperCamelCase = size_pa
_UpperCamelCase = rate_w
_UpperCamelCase = rate_t
_UpperCamelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_UpperCamelCase = -2 * np.random.rand(self.conva[1] ) + 1
_UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1
_UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(lowerCAmelCase__ , '''wb''' ) as f:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Model saved: {save_path}""" )
@classmethod
def snake_case__ ( cls : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
with open(lowerCAmelCase__ , '''rb''' ) as f:
_UpperCamelCase = pickle.load(lowerCAmelCase__ ) # noqa: S301
_UpperCamelCase = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
_UpperCamelCase = model_dic.get('''size_pooling1''' )
_UpperCamelCase = model_dic.get('''num_bp1''' )
_UpperCamelCase = model_dic.get('''num_bp2''' )
_UpperCamelCase = model_dic.get('''num_bp3''' )
_UpperCamelCase = model_dic.get('''rate_weight''' )
_UpperCamelCase = model_dic.get('''rate_thre''' )
# create model instance
_UpperCamelCase = CNN(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# modify model parameter
_UpperCamelCase = model_dic.get('''w_conv1''' )
_UpperCamelCase = model_dic.get('''wkj''' )
_UpperCamelCase = model_dic.get('''vji''' )
_UpperCamelCase = model_dic.get('''thre_conv1''' )
_UpperCamelCase = model_dic.get('''thre_bp2''' )
_UpperCamelCase = model_dic.get('''thre_bp3''' )
return conv_ins
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
return round(lowerCAmelCase__ , 3 )
def snake_case__ ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase = convs[0]
_UpperCamelCase = convs[1]
_UpperCamelCase = np.shape(lowerCAmelCase__ )[0]
# get the data slice of original image data, data_focus
_UpperCamelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
_UpperCamelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
_UpperCamelCase = []
_UpperCamelCase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase__ ):
_UpperCamelCase = []
for i_focus in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase__ ) )
_UpperCamelCase = np.asmatrix(lowerCAmelCase__ ).reshape(
lowerCAmelCase__ , lowerCAmelCase__ )
data_featuremap.append(lowerCAmelCase__ )
# expanding the data slice to One dimenssion
_UpperCamelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase__ ) )
_UpperCamelCase = np.asarray(lowerCAmelCase__ )
return focus_list, data_featuremap
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int="average_pool" ) -> Dict:
'''simple docstring'''
_UpperCamelCase = len(featuremaps[0] )
_UpperCamelCase = int(size_map / size_pooling )
_UpperCamelCase = []
for i_map in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = featuremaps[i_map]
_UpperCamelCase = []
for i_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase__ ) )
_UpperCamelCase = np.asmatrix(lowerCAmelCase__ ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
featuremap_pooled.append(lowerCAmelCase__ )
return featuremap_pooled
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = []
for i in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = np.shape(data[i] )
_UpperCamelCase = data[i].reshape(1 , shapes[0] * shapes[1] )
_UpperCamelCase = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase__ )
_UpperCamelCase = np.asarray(lowerCAmelCase__ )
return data_expanded
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = np.asarray(lowerCAmelCase__ )
_UpperCamelCase = np.shape(lowerCAmelCase__ )
_UpperCamelCase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = 0
for i_map in range(lowerCAmelCase__ ):
_UpperCamelCase = np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = pd_pool[
i_pool
]
_UpperCamelCase = i_pool + 1
_UpperCamelCase = np.multiply(
lowerCAmelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase__ )
return pd_all
def snake_case__ ( self : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=bool ) -> Union[str, Any]:
'''simple docstring'''
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(lowerCAmelCase__ )) )
print((''' - - Shape: Teach_Data ''', np.shape(lowerCAmelCase__ )) )
_UpperCamelCase = 0
_UpperCamelCase = []
_UpperCamelCase = 10000
while rp < n_repeat and mse >= error_accuracy:
_UpperCamelCase = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(lowerCAmelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
_UpperCamelCase = np.asmatrix(datas_train[p] )
_UpperCamelCase = np.asarray(datas_teach[p] )
_UpperCamelCase , _UpperCamelCase = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase = self.pooling(lowerCAmelCase__ , self.size_poolinga )
_UpperCamelCase = np.shape(lowerCAmelCase__ )
_UpperCamelCase = self._expand(lowerCAmelCase__ )
_UpperCamelCase = data_bp_input
_UpperCamelCase = np.dot(lowerCAmelCase__ , self.vji.T ) - self.thre_bpa
_UpperCamelCase = self.sig(lowerCAmelCase__ )
_UpperCamelCase = np.dot(lowerCAmelCase__ , self.wkj.T ) - self.thre_bpa
_UpperCamelCase = self.sig(lowerCAmelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_UpperCamelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
_UpperCamelCase = np.multiply(
np.dot(lowerCAmelCase__ , self.wkj ) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
_UpperCamelCase = np.dot(lowerCAmelCase__ , self.vji )
_UpperCamelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
_UpperCamelCase = pd_conva_pooled.T.getA().tolist()
_UpperCamelCase = self._calculate_gradient_from_pool(
lowerCAmelCase__ , lowerCAmelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_UpperCamelCase = self._expand_mat(pd_conva_all[k_conv] )
_UpperCamelCase = self.rate_weight * np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_UpperCamelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_UpperCamelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_UpperCamelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_UpperCamelCase = self.thre_bpa - pd_k_all * self.rate_thre
_UpperCamelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_UpperCamelCase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_UpperCamelCase = rp + 1
_UpperCamelCase = error_count / patterns
all_mse.append(lowerCAmelCase__ )
def draw_error():
_UpperCamelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase__ , '''+-''' )
plt.plot(lowerCAmelCase__ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(lowerCAmelCase__ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(lowerCAmelCase__ )) )
for p in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = np.asmatrix(datas_test[p] )
_UpperCamelCase , _UpperCamelCase = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase = self.pooling(lowerCAmelCase__ , self.size_poolinga )
_UpperCamelCase = self._expand(lowerCAmelCase__ )
_UpperCamelCase = data_bp_input
_UpperCamelCase = bp_outa * self.vji.T - self.thre_bpa
_UpperCamelCase = self.sig(lowerCAmelCase__ )
_UpperCamelCase = bp_outa * self.wkj.T - self.thre_bpa
_UpperCamelCase = self.sig(lowerCAmelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
_UpperCamelCase = [list(map(self.do_round , lowerCAmelCase__ ) ) for each in produce_out]
return np.asarray(lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = np.asmatrix(lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase = self.pooling(lowerCAmelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 324 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
if radian_mode:
return [magnitude * cos(UpperCAmelCase_ ), magnitude * sin(UpperCAmelCase_ )]
return [magnitude * cos(radians(UpperCAmelCase_ ) ), magnitude * sin(radians(UpperCAmelCase_ ) )]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 10**-1 ):
UpperCAmelCase : NDArray[floataa] = cross(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : float = sum(UpperCAmelCase_ )
return abs(UpperCAmelCase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowercase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 280 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase : list = []
for char_count in range(UpperCAmelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 280 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
SCREAMING_SNAKE_CASE :Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__A = model_type_to_module_name(a_ )
__A = importlib.import_module(F'''.{module_name}''' , "transformers.models" )
try:
return getattr(a_ , a_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a_ , "__name__" , a_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__A = importlib.import_module("transformers" )
if hasattr(a_ , a_ ):
return getattr(a_ , a_ )
return None
def UpperCAmelCase ( a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> List[str]:
"""simple docstring"""
__A = get_file_from_repo(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(a_ , encoding="utf-8" ) as reader:
return json.load(a_ )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCamelCase_ ( cls : Union[str, Any] ,A : Tuple ,**A : Optional[int] ):
__A = kwargs.pop("config" ,A )
__A = kwargs.pop("trust_remote_code" ,A )
__A = True
__A , __A = ImageProcessingMixin.get_image_processor_dict(A ,**A )
__A = config_dict.get("image_processor_type" ,A )
__A = None
if "AutoImageProcessor" in config_dict.get("auto_map" ,{} ):
__A = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__A = config_dict.pop("feature_extractor_type" ,A )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__A = feature_extractor_class.replace("FeatureExtractor" ,"ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" ,{} ):
__A = config_dict["auto_map"]["AutoFeatureExtractor"]
__A = feature_extractor_auto_map.replace("FeatureExtractor" ,"ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A ,A ):
__A = AutoConfig.from_pretrained(A ,**A )
# It could be in `config.image_processor_type``
__A = getattr(A ,"image_processor_type" ,A )
if hasattr(A ,"auto_map" ) and "AutoImageProcessor" in config.auto_map:
__A = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__A = image_processor_class_from_name(A )
__A = image_processor_auto_map is not None
__A = image_processor_class is not None or type(A ) in IMAGE_PROCESSOR_MAPPING
__A = resolve_trust_remote_code(
A ,A ,A ,A )
if has_remote_code and trust_remote_code:
__A = get_class_from_dynamic_module(
A ,A ,**A )
__A = kwargs.pop("code_revision" ,A )
if os.path.isdir(A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A ,**A )
elif image_processor_class is not None:
return image_processor_class.from_dict(A ,**A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A ) in IMAGE_PROCESSOR_MAPPING:
__A = IMAGE_PROCESSOR_MAPPING[type(A )]
return image_processor_class.from_dict(A ,**A )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase_ ( A : Optional[Any] ,A : Any ):
IMAGE_PROCESSOR_MAPPING.register(A ,A )
| 15 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any=2 , snake_case__ : Optional[Any]=5_6 , snake_case__ : Tuple=True , snake_case__ : List[str]=True , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : Optional[int]=3_2 , snake_case__ : List[str]=2 , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]="gelu_new" , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[Any]=4 , snake_case__ : str="block_sparse" , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=False , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=3 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : List[str] = seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : List[str] = use_attention_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : int = num_choices
UpperCAmelCase__ : List[Any] = rescale_embeddings
UpperCAmelCase__ : Tuple = attention_type
UpperCAmelCase__ : Union[str, Any] = use_bias
UpperCAmelCase__ : Optional[int] = block_size
UpperCAmelCase__ : List[Any] = num_random_blocks
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : str = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self : Union[str, Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self : List[Any] ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self : Tuple ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self : int ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(snake_case__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : str = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , snake_case__ : List[str]=None , **snake_case__ : Optional[int] ):
return model(input_ids=snake_case__ , attention_mask=snake_case__ , **snake_case__ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase__ : List[str] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase__ : List[str] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=1e-5 , snake_case__ : str="outputs" , snake_case__ : Any=None ):
'''simple docstring'''
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 370 |
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Tuple = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''sew-d'''
def __init__( self , A__=32 , A__=768 , A__=12 , A__=12 , A__=3072 , A__=2 , A__=512 , A__=256 , A__=True , A__=True , A__=("p2c", "c2p") , A__="layer_norm" , A__="gelu_python" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=0.0_2 , A__=1e-7 , A__=1e-5 , A__="group" , A__="gelu" , A__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__=False , A__=128 , A__=16 , A__=True , A__=0.0_5 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=0 , A__="mean" , A__=False , A__=False , A__=256 , A__=0 , A__=1 , A__=2 , **A__ , ):
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
A__ : Optional[int] = hidden_size
A__ : Any = feat_extract_norm
A__ : Any = feat_extract_activation
A__ : Optional[int] = list(A__ )
A__ : List[str] = list(A__ )
A__ : Union[str, Any] = list(A__ )
A__ : Union[str, Any] = conv_bias
A__ : Tuple = num_conv_pos_embeddings
A__ : Optional[Any] = num_conv_pos_embedding_groups
A__ : List[Any] = len(self.conv_dim )
A__ : List[str] = num_hidden_layers
A__ : Any = intermediate_size
A__ : int = squeeze_factor
A__ : str = max_position_embeddings
A__ : Optional[Any] = position_buckets
A__ : Optional[int] = share_att_key
A__ : Any = relative_attention
A__ : int = norm_rel_ebd
A__ : int = list(A__ )
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = num_attention_heads
A__ : Dict = hidden_dropout
A__ : Tuple = attention_dropout
A__ : List[Any] = activation_dropout
A__ : List[Any] = feat_proj_dropout
A__ : Optional[Any] = final_dropout
A__ : List[str] = layer_norm_eps
A__ : List[Any] = feature_layer_norm_eps
A__ : List[Any] = initializer_range
A__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Dict = apply_spec_augment
A__ : Any = mask_time_prob
A__ : Optional[int] = mask_time_length
A__ : Tuple = mask_time_min_masks
A__ : int = mask_feature_prob
A__ : Tuple = mask_feature_length
A__ : Union[str, Any] = mask_feature_min_masks
# ctc loss
A__ : str = ctc_loss_reduction
A__ : Optional[int] = ctc_zero_infinity
# sequence classification
A__ : List[Any] = use_weighted_layer_sum
A__ : Dict = classifier_proj_size
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 192 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A_ : Any = pytest.mark.integration
@require_faiss
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : Tuple = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(A__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
A__ : Dataset = self._create_dummy_dataset()
A__ : Union[str, Any] = dset.map(
lambda A__ , A__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A__ , keep_in_memory=A__ )
A__ : int = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ : Optional[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def __A ( self ):
import faiss
A__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ : str = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __A ( self ):
import faiss
A__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ : Tuple = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __A ( self ):
A__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(A__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
A__ : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ : List[Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ : List[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
A__ : Any = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=A__ )
A__ , A__ : Any = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
import faiss
A__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ : Any = np.zeros(5 , dtype=np.floataa )
A__ : str = 1
A__ , A__ : Optional[Any] = index.search(A__ )
self.assertRaises(A__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ : str = index.search_batch(A__ )
self.assertRaises(A__ , index.search_batch , queries[0] )
A__ : str = [scores[0] for scores in total_scores]
A__ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A__ )
def __A ( self ):
import faiss
A__ : Dict = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ : Dict = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A__ ):
A__ : List[Any] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
A__ : List[Any] = faiss.IndexFlat(5 )
A__ : Union[str, Any] = FaissIndex(custom_index=A__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
A__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__ ) as tmp_file:
index.save(tmp_file.name )
A__ : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ : Optional[Any] = np.zeros(5 , dtype=np.floataa )
A__ : Optional[int] = 1
A__ , A__ : List[Any] = index.search(A__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase (lowercase_: Dict ) -> Optional[Any]:
import faiss
A__ : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ : Optional[Any] = """index.faiss"""
A__ : Any = f"""mock://{index_name}"""
index.save(lowercase_ , storage_options=mockfs.storage_options )
A__ : str = FaissIndex.load(lowercase_ , storage_options=mockfs.storage_options )
A__ : int = np.zeros(5 , dtype=np.floataa )
A__ : Union[str, Any] = 1
A__ , A__ : Any = index.search(lowercase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ : List[str] = Elasticsearch()
A__ : List[Any] = {"""acknowledged""": True}
A__ : int = ElasticSearchIndex(es_client=A__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
A__ : Dict = """foo"""
A__ : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ : Tuple = index.search(A__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ : List[str] = """foo"""
A__ : str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ : Dict = index.search(A__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ : Union[str, Any] = ["""foo""", """bar""", """foobar"""]
A__ : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ : Dict = index.search_batch(A__ )
A__ : Tuple = [scores[0] for scores in total_scores]
A__ : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) , 0 )
self.assertListEqual([1, 1, 1] , A__ )
# batched queries with timeout
A__ : int = ["""foo""", """bar""", """foobar"""]
A__ : List[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ : Tuple = index.search_batch(A__ , request_timeout=30 )
A__ : List[Any] = [scores[0] for scores in total_scores]
A__ : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) , 0 )
self.assertListEqual([1, 1, 1] , A__ )
| 192 | 1 |
from ... import PretrainedConfig
UpperCamelCase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case : List[str] = "nezha"
def __init__( self: Optional[int] , UpperCAmelCase_: Tuple=21_128 , UpperCAmelCase_: Union[str, Any]=768 , UpperCAmelCase_: int=12 , UpperCAmelCase_: List[str]=12 , UpperCAmelCase_: str=3_072 , UpperCAmelCase_: int="gelu" , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Union[str, Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: Optional[Any]=64 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: List[str]=0.02 , UpperCAmelCase_: List[Any]=1E-12 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: Union[str, Any]=2 , UpperCAmelCase_: str=3 , UpperCAmelCase_: str=True , **UpperCAmelCase_: Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = max_relative_position
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = classifier_dropout
_SCREAMING_SNAKE_CASE = use_cache
| 364 |
import copy
import re
class __UpperCAmelCase :
__snake_case : Any = "hp"
__snake_case : str = {}
__snake_case : List[Any] = None
@classmethod
def UpperCamelCase ( cls: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = prefix
_SCREAMING_SNAKE_CASE = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Any , UpperCAmelCase_: str ):
'''simple docstring'''
if len(UpperCAmelCase_ ) == 0:
return ""
_SCREAMING_SNAKE_CASE = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCAmelCase_ ) + 1 ):
_SCREAMING_SNAKE_CASE = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase_: List[Any] ):
_SCREAMING_SNAKE_CASE = """"""
while integer != 0:
_SCREAMING_SNAKE_CASE = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE = 0
while True:
_SCREAMING_SNAKE_CASE = word + """#""" + int_to_alphabetic(UpperCAmelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE = sword
break
_SCREAMING_SNAKE_CASE = short_word
_SCREAMING_SNAKE_CASE = word
return short_word
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = param_name.split("""_""" )
_SCREAMING_SNAKE_CASE = [TrialShortNamer.shortname_for_word(UpperCAmelCase_ , UpperCAmelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE = ["""""", """_"""]
for separator in separators:
_SCREAMING_SNAKE_CASE = separator.join(UpperCAmelCase_ )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE = shortname
_SCREAMING_SNAKE_CASE = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase ( UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TrialShortNamer.shortname_for_key(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = short_name
_SCREAMING_SNAKE_CASE = param_name
@classmethod
def UpperCamelCase ( cls: str ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
_SCREAMING_SNAKE_CASE = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = info
@classmethod
def UpperCamelCase ( cls: Any , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""short_param"""][k]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = 1 if v else 0
_SCREAMING_SNAKE_CASE = """""" if isinstance(UpperCAmelCase_ , (int, float) ) else """-"""
_SCREAMING_SNAKE_CASE = F'{key}{sep}{v}'
name.append(UpperCAmelCase_ )
return "_".join(UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = repr.split("""_""" )
_SCREAMING_SNAKE_CASE = {}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value.split("""-""" )
else:
_SCREAMING_SNAKE_CASE = re.sub("""[0-9.]""" , """""" , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = float(re.sub("""[^0-9.]""" , """""" , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""reverse_short_param"""][p_k]
_SCREAMING_SNAKE_CASE = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE = cls.DEFAULTS[k]
return parameters
| 125 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """van"""
def __init__( self : Dict , UpperCamelCase__ : Any=224 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[str]=[7, 3, 3, 3] , UpperCamelCase__ : Optional[int]=[4, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[64, 128, 320, 512] , UpperCamelCase__ : List[str]=[3, 3, 12, 3] , UpperCamelCase__ : Union[str, Any]=[8, 8, 4, 4] , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Union[str, Any]=1E-6 , UpperCamelCase__ : Tuple=1E-2 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Any=0.0 , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = patch_sizes
__magic_name__ = strides
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = mlp_ratios
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = layer_scale_init_value
__magic_name__ = drop_path_rate
__magic_name__ = dropout_rate
| 88 | """simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=32 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=[10, 20, 30, 40] ,__UpperCAmelCase=[2, 2, 3, 2] ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=10 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=["stage2", "stage3", "stage4"] ,__UpperCAmelCase=3 ,__UpperCAmelCase=None ,) -> Optional[int]:
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def snake_case__ ( self ) -> List[Any]:
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ) -> str:
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def snake_case__ ( self ) -> Tuple:
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=5_12 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCAmelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=2_56 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCAmelCase ,loss_ignore_index=2_55 ,num_labels=self.num_labels ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case__ ( self ) -> str:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase__ : int = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Dict = False
def snake_case__ ( self ) -> Union[str, Any]:
A__ = UperNetModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ) -> int:
return
def snake_case__ ( self ) -> List[Any]:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__UpperCAmelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def snake_case__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def snake_case__ ( self ) -> Tuple:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case__ ( self ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def snake_case__ ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case__ ( self ) -> Any:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case__ ( self ) -> Dict:
pass
def snake_case__ ( self ) -> Optional[int]:
def check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
A__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(__UpperCAmelCase )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason='UperNet does not have tied weights' )
def snake_case__ ( self ) -> str:
pass
@slow
def snake_case__ ( self ) -> List[str]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
A__ = Image.open(UpperCamelCase__ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Dict:
A__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
A__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCAmelCase )
A__ = prepare_img()
A__ = processor(images=__UpperCAmelCase ,return_tensors='pt' ).to(__UpperCAmelCase )
with torch.no_grad():
A__ = model(**__UpperCAmelCase )
A__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
A__ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=1e-4 ) )
def snake_case__ ( self ) -> str:
A__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
A__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCAmelCase )
A__ = prepare_img()
A__ = processor(images=__UpperCAmelCase ,return_tensors='pt' ).to(__UpperCAmelCase )
with torch.no_grad():
A__ = model(**__UpperCAmelCase )
A__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
A__ = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=1e-4 ) )
| 221 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: Tuple = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: str = {name: i for i, name in enumerate(_UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
_UpperCAmelCase , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor(
_UpperCAmelCase , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE_: str = torch.tensor(
_UpperCAmelCase , dtype=torch.floataa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE_: Tuple = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: int = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[Any] = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[str] = residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: str = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Dict = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[str] = residx_atomaa_mask
return protein
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = tree_map(lambda _UpperCAmelCase : torch.tensor(_UpperCAmelCase , device=batch["aatype"].device ) , _UpperCAmelCase , np.ndarray )
SCREAMING_SNAKE_CASE_: Optional[int] = tensor_tree_map(lambda _UpperCAmelCase : np.array(_UpperCAmelCase ) , make_atomaa_masks(_UpperCAmelCase ) )
return out
| 127 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase : Any = """path-to-your-trained-model"""
lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase : Union[str, Any] = """A photo of sks dog in a bucket"""
lowerCAmelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 127 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a , a=None , a=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
__A : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
__A : int = getattr(a , a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__A : Tuple = new_module
__A : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__A : Tuple = tensor_name in module._buffers
__A : Tuple = getattr(a , a )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__A : Optional[Any] = False
__A : List[str] = False
if is_buffer or not is_bitsandbytes_available():
__A : List[Any] = False
__A : Dict = False
else:
__A : Optional[Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__A : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__A : Optional[int] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__A : int = old_value.to(a )
elif isinstance(a , torch.Tensor ):
__A : Dict = value.to('cpu' )
if value.dtype == torch.inta:
__A : List[str] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__A : Union[str, Any] = torch.tensor(a , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , a ) and fpaa_statistics is None:
__A : Any = new_value.T
__A : Union[str, Any] = old_value.__dict__
if is_abit:
__A : Optional[Any] = bnb.nn.IntaParams(a , requires_grad=a , **a ).to(a )
elif is_abit:
__A : Tuple = bnb.nn.Paramsabit(a , requires_grad=a , **a ).to(a )
__A : Optional[int] = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(a ) )
else:
if value is None:
__A : Dict = old_value.to(a )
elif isinstance(a , torch.Tensor ):
__A : Union[str, Any] = value.to(a )
else:
__A : str = torch.tensor(a , device=a )
if is_buffer:
__A : List[Any] = new_value
else:
__A : str = nn.Parameter(a , requires_grad=old_value.requires_grad )
__A : Optional[int] = new_value
def _SCREAMING_SNAKE_CASE ( a , a=None , a=None , a=None , a=False ) -> str:
for name, module in model.named_children():
if current_key_name is None:
__A : Optional[Any] = []
current_key_name.append(a )
if (isinstance(a , nn.Linear ) or isinstance(a , a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(a , a ):
__A , __A : str = module.weight.shape
else:
__A : int = module.in_features
__A : List[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__A : int = bnb.nn.LinearabitLt(
a , a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__A : List[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__A : Any = bnb.nn.Linearabit(
a , a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__A : Union[str, Any] = True
# Store the module class in case we need to transpose the weight later
__A : str = type(a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(a )
if len(list(module.children() ) ) > 0:
__A , __A : str = _replace_with_bnb_linear(
a , a , a , a , has_been_replaced=a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE ( a , a=None , a=None , a=None ) -> Any:
__A : Optional[int] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__A , __A : Optional[Any] = _replace_with_bnb_linear(
a , a , a , a )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> str:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , a , )
return replace_with_bnb_linear(*a , **a )
def _SCREAMING_SNAKE_CASE ( *a , **a ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , a , )
return set_module_quantized_tensor_to_device(*a , **a )
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : str = deepcopy(a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__A : Tuple = find_tied_parameters(a )
# For compatibility with Accelerate < 0.18
if isinstance(a , a ):
__A : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__A : Dict = sum(a , [] )
__A : Optional[int] = len(a ) > 0
# Check if it is a base model
__A : Dict = not hasattr(a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__A : Tuple = list(model.named_children() )
__A : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
__A : List[Any] = set(a ) - set(a )
__A : int = list(set(a ) ) + list(a )
# remove ".weight" from the keys
__A : List[Any] = ['.weight', '.bias']
__A : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__A : int = name.replace(a , '' )
filtered_module_names.append(a )
return filtered_module_names
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a ) -> int:
if not nums:
return 0
__A : Optional[int] = nums[0]
__A : str = 0
for num in nums[1:]:
__A , __A : Tuple = (
max_excluding + num,
max(a , a ),
)
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : int =abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> List[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case , id=snake_case )
| 80 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
A_ : List[str] =parser.parse_args()
A_ : Any =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : str = Path(snake_case__ )
lowerCamelCase__ : List[Any] = Path(snake_case__ )
dest_dir.mkdir(exist_ok=snake_case__ )
for path in src_dir.iterdir():
lowerCamelCase__ : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCamelCase__ : Dict = dest_dir.joinpath(path.name )
print(snake_case__ )
dest_path.open("""w""" ).write("""\n""".join(snake_case__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 41 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298 | 0 |
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
# Algorithm for the pigeonhole sorting
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
__snake_case = min(snake_case_ ) # min() finds the minimum value
__snake_case = max(snake_case_ ) # max() finds the maximum value
__snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case_ , snake_case_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case = 0
for count in range(snake_case_ ):
while holes[count] > 0:
holes[count] -= 1
__snake_case = count + min_val
i += 1
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case_ )
print('''Sorted order is:''' , ''' '''.join(snake_case_ ) )
if __name__ == "__main__":
main()
| 238 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : int, *lowerCamelCase : Tuple, **lowerCamelCase : int )-> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''', lowerCamelCase, )
super().__init__(*lowerCamelCase, **lowerCamelCase )
| 238 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Dict = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Tuple = "roc_bert"
def __init__( self : Union[str, Any] , __magic_name__ : List[str]=3_05_22 , __magic_name__ : Tuple=7_68 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : str=2 , __magic_name__ : Any=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : int=True , __magic_name__ : Optional[int]=0 , __magic_name__ : str="absolute" , __magic_name__ : Tuple=None , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=9_10 , __magic_name__ : Tuple=5_12 , __magic_name__ : Dict=2_48_58 , __magic_name__ : Any=True , **__magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = enable_pronunciation
UpperCAmelCase_ : Union[str, Any] = enable_shape
UpperCAmelCase_ : List[str] = pronunciation_embed_dim
UpperCAmelCase_ : List[str] = pronunciation_vocab_size
UpperCAmelCase_ : int = shape_embed_dim
UpperCAmelCase_ : Optional[int] = shape_vocab_size
UpperCAmelCase_ : Optional[Any] = concat_input
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
| 125 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase : str = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = UNetaDModel
A : Dict = 'sample'
@property
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Tuple = 4
snake_case_ : Optional[Any] = 3
snake_case_ : Any = (32, 32)
snake_case_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
snake_case_ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowerCAmelCase ( self ) -> Dict:
return (3, 32, 32)
@property
def _lowerCAmelCase ( self ) -> Dict:
return (3, 32, 32)
def _lowerCAmelCase ( self ) -> int:
snake_case_ : int = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
snake_case_ : List[str] = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A : Dict = UNetaDModel
A : List[str] = 'sample'
@property
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : List[Any] = 4
snake_case_ : str = 4
snake_case_ : Tuple = (32, 32)
snake_case_ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
snake_case_ : List[str] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
return (4, 32, 32)
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return (4, 32, 32)
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
snake_case_ : Dict = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> str:
snake_case_ , snake_case_ : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowerCamelCase )
snake_case_ : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ , snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
snake_case_ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _lowerCAmelCase ( self ) -> List[str]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_ , snake_case_ : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
snake_case_ : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ : int = noise.to(__lowerCamelCase )
snake_case_ : str = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
snake_case_ : Any = model_accelerate(__lowerCamelCase , __lowerCamelCase )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ : Union[str, Any] = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=__lowerCamelCase , low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
snake_case_ : str = model_normal_load(__lowerCamelCase , __lowerCamelCase )["sample"]
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Union[str, Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(__lowerCamelCase )
snake_case_ : Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ : int = noise.to(__lowerCamelCase )
snake_case_ : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
snake_case_ : str = model(__lowerCamelCase , __lowerCamelCase ).sample
snake_case_ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ : Any = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 ) )
class UpperCAmelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
A : Dict = UNetaDModel
A : List[Any] = 'sample'
@property
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=(32, 32) ) -> Tuple:
snake_case_ : List[str] = 4
snake_case_ : Union[str, Any] = 3
snake_case_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
snake_case_ : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowerCAmelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _lowerCAmelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Any = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
snake_case_ : Any = self.dummy_input
return init_dict, inputs_dict
@slow
def _lowerCAmelCase ( self ) -> Any:
snake_case_ , snake_case_ : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__lowerCamelCase )
snake_case_ : str = self.dummy_input
snake_case_ : str = floats_tensor((4, 3) + (256, 256) ).to(__lowerCamelCase )
snake_case_ : Any = noise
snake_case_ : Optional[Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(__lowerCamelCase )
snake_case_ : Dict = 4
snake_case_ : List[str] = 3
snake_case_ : Union[str, Any] = (256, 256)
snake_case_ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
snake_case_ : str = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
snake_case_ : Tuple = model(__lowerCamelCase , __lowerCamelCase ).sample
snake_case_ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Any = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1e-2 ) )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Dict = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(__lowerCamelCase )
snake_case_ : str = 4
snake_case_ : Optional[Any] = 3
snake_case_ : List[str] = (32, 32)
snake_case_ : int = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
snake_case_ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(__lowerCamelCase , __lowerCamelCase ).sample
snake_case_ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : List[Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1e-2 ) )
def _lowerCAmelCase ( self ) -> Any:
# not required for this model
pass | 366 |
def lowerCAmelCase__ ( _a : dict ):
snake_case_ : List[Any] = set()
# edges = list of graph's edges
snake_case_ : int = get_edges(_a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
snake_case_ , snake_case_ : Dict = edges.pop()
chosen_vertices.add(_a )
chosen_vertices.add(_a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_a )
return chosen_vertices
def lowerCAmelCase__ ( _a : dict ):
snake_case_ : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 36 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 10**-10 ):
"""simple docstring"""
snake_case = a
while True:
snake_case = Decimal(UpperCamelCase_ ) - (
Decimal(eval(UpperCamelCase_ ) ) / Decimal(eval(str(diff(UpperCamelCase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase_ ) ) < precision: # noqa: S307
return float(UpperCamelCase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 127 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case = deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case = kwargs.pop('''torchscript''' , self.torchscript )
snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__snake_case )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Trace the models using torchscript'} )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__magic_name__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
snake_case = torch.device('''cpu''' )
snake_case = 0
elif is_torch_tpu_available():
snake_case = xm.xla_device()
snake_case = 0
else:
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a_ ( self ):
return is_torch_tpu_available() and self.tpu
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a_ ( self ):
return self.n_gpu > 0
| 127 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase__( ):
A__ = [randint(-10_00 , 10_00 ) for i in range(10 )]
A__ = randint(-50_00 , 50_00 )
return (arr, r)
a__: List[str] = make_dataset()
def UpperCamelCase__( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def UpperCamelCase__( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
arr.sort()
A__ = len(UpperCamelCase__ )
for i in range(n - 1 ):
A__ , A__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase__( ):
A__ = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
A__ = '''
triplet_sum1(*dataset)
'''
A__ = '''
triplet_sum2(*dataset)
'''
A__ = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_00_00 )
A__ = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_00_00 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a__: List[str] = solution_times()
print(F"The time for naive implementation is {times[0]}.")
print(F"The time for optimized implementation is {times[1]}.")
| 351 |
# Algorithm for the pigeonhole sorting
def UpperCamelCase__( UpperCamelCase__ : int )->str:
A__ = min(UpperCamelCase__ ) # min() finds the minimum value
A__ = max(UpperCamelCase__ ) # max() finds the maximum value
A__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
A__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
A__ = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
A__ = count + min_val
i += 1
def UpperCamelCase__( )->Tuple:
A__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print('''Sorted order is:''' , ''' '''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 39 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
UpperCamelCase__ = len(__A )
# We need to create solution object to save path.
UpperCamelCase__ = [[0 for _ in range(__A )] for _ in range(__A )]
UpperCamelCase__ = run_maze(__A , 0 , 0 , __A )
if solved:
print("\n".join(str(__A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _UpperCamelCase ( __A , __A , __A , __A ) -> bool:
'''simple docstring'''
UpperCamelCase__ = len(__A )
# Final check point.
if i == j == (size - 1):
UpperCamelCase__ = 1
return True
UpperCamelCase__ = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase__ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase__ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase__ = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
UpperCamelCase__ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 1 |
'''simple docstring'''
from math import factorial
def UpperCAmelCase ( a_ , a_ , a_ ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(a_ , a_ ) or not isinstance(a_ , a_ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(a_ ) )
coefficient /= factorial(a_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.7_5))
| 358 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 164 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
with open(lowerCamelCase_ , encoding="""utf-8""" ) as input_file:
UpperCamelCase = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCamelCase = input_file.read()
UpperCamelCase = regexp.search(lowerCamelCase_ )
return match
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
with open(lowerCamelCase_ , encoding="""utf-8""" ) as input_file:
UpperCamelCase = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase = regexp.finditer(lowerCamelCase_ )
UpperCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Path("""./datasets""" )
UpperCamelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase_ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Path("""./datasets""" )
UpperCamelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase_ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 343 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any]=13, lowerCamelCase : Any=10, lowerCamelCase : Optional[Any]=3, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Dict=2, lowerCamelCase : Tuple=2, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : Dict=32, lowerCamelCase : Any=5, lowerCamelCase : Dict=4, lowerCamelCase : Any=37, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : Dict=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Dict=10, lowerCamelCase : str=0.02, lowerCamelCase : List[Any]=0.9, lowerCamelCase : List[Any]=None, )-> str:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Any =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Optional[Any] =num_channels
lowerCamelCase__ : Optional[int] =patch_size
lowerCamelCase__ : List[str] =tubelet_size
lowerCamelCase__ : Optional[Any] =num_frames
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : List[Any] =use_labels
lowerCamelCase__ : Union[str, Any] =hidden_size
lowerCamelCase__ : List[str] =num_hidden_layers
lowerCamelCase__ : str =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Any =hidden_act
lowerCamelCase__ : int =hidden_dropout_prob
lowerCamelCase__ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =type_sequence_label_size
lowerCamelCase__ : int =initializer_range
lowerCamelCase__ : Optional[Any] =mask_ratio
lowerCamelCase__ : Any =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase__ : Optional[Any] =(image_size // patch_size) ** 2
lowerCamelCase__ : Any =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase__ : List[Any] =int(mask_ratio * self.seq_length )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : str =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Optional[int]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Any )-> Union[str, Any]:
lowerCamelCase__ : List[str] =VideoMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Dict:
lowerCamelCase__ : int =VideoMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ : Optional[int] =torch.ones((self.num_masks,) )
lowerCamelCase__ : List[str] =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase__ : int =mask.expand(self.batch_size, -1 ).bool()
lowerCamelCase__ : Any =model(lowerCamelCase, lowerCamelCase )
# model only returns predictions for masked patches
lowerCamelCase__ : Optional[int] =mask.sum().item()
lowerCamelCase__ : Dict =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs
lowerCamelCase__ : List[str] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_a = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ : int =VideoMAEModelTester(self )
lowerCamelCase__ : Optional[int] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : List[str]=False )-> Tuple:
lowerCamelCase__ : str =copy.deepcopy(lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ : Any =torch.ones((self.model_tester.num_masks,) )
lowerCamelCase__ : Dict =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase__ : Optional[int] =mask.expand(self.model_tester.batch_size, -1 ).bool()
lowerCamelCase__ : int =bool_masked_pos.to(lowerCamelCase )
if return_labels:
if model_class in [
*get_values(lowerCamelCase ),
]:
lowerCamelCase__ : List[str] =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase )
return inputs_dict
def snake_case ( self : List[Any] )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def snake_case ( self : List[str] )-> Tuple:
pass
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] =model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase__ : Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(lowerCamelCase )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[str] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[int]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : List[Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
@slow
def snake_case ( self : List[Any] )-> Dict:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =VideoMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case ( self : List[str] )-> Optional[int]:
if not self.has_attentions:
pass
else:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple =True
for model_class in self.all_model_classes:
lowerCamelCase__ : Any =self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ : Any =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : int =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : str =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : int =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] =True
lowerCamelCase__ : Union[str, Any] =True
lowerCamelCase__ : Dict =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
self.assertEqual(out_len + 1, len(lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def snake_case ( self : str )-> int:
def check_hidden_states_output(lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : List[Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Dict =outputs.hidden_states
lowerCamelCase__ : Any =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowerCamelCase__ : Any =self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ : str =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : int =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Optional[int] )-> int:
pass
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : int =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : str =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self : List[str] )-> List[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] )-> Dict:
lowerCamelCase__ : str =VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.default_image_processor
lowerCamelCase__ : List[str] =prepare_video()
lowerCamelCase__ : Union[str, Any] =image_processor(lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Tuple =model(**lowerCamelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Tuple =torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Tuple =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase )
lowerCamelCase__ : Optional[int] =self.default_image_processor
lowerCamelCase__ : Dict =prepare_video()
lowerCamelCase__ : Dict =image_processor(lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase__ : str =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Dict =torch.load(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase )
# verify the logits
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Union[str, Any] =torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=lowerCamelCase )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase__ : Optional[int] =torch.tensor([0.5_142], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase__ : Union[str, Any] =VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=lowerCamelCase ).to(
lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] =model(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =torch.tensor(torch.tensor([0.6_469] ), device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase, atol=1E-4 ) )
| 238 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCAmelCase :
UpperCamelCase = XGLMConfig
UpperCamelCase = {}
UpperCamelCase = "gelu"
def __init__( self : List[str] , A : str , A : List[str]=14 , A : List[Any]=7 , A : Dict=True , A : str=True , A : Union[str, Any]=True , A : Any=99 , A : int=32 , A : Union[str, Any]=2 , A : Optional[int]=4 , A : Dict=37 , A : Dict="gelu" , A : Dict=0.1 , A : List[Any]=0.1 , A : Dict=5_12 , A : int=0.0_2 , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = None
_UpperCAmelCase = 0
_UpperCAmelCase = 2
_UpperCAmelCase = 1
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_UpperCAmelCase , )
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
UpperCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = TFXGLMModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , n_embd=37)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFXGLMModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Dict , A : str=True) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
_UpperCAmelCase = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_UpperCAmelCase = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase)
@slow
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
_UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
_UpperCAmelCase = tokenizer('Today is a nice day and' , return_tensors='tf')
_UpperCAmelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
_UpperCAmelCase = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , seed=[7, 0])
_UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase)
_UpperCAmelCase = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
@slow
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
_UpperCAmelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
_UpperCAmelCase = '''left'''
# use different length sentences to test batching
_UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
_UpperCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding=_UpperCAmelCase)
_UpperCAmelCase = inputs['''input_ids''']
_UpperCAmelCase = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12)
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='tf').input_ids
_UpperCAmelCase = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12)
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='tf').input_ids
_UpperCAmelCase = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12)
_UpperCAmelCase = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase)
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase)
_UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence])
| 352 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['''CLIPFeatureExtractor''']
UpperCAmelCase_ : Dict = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A ( lowercase ) -> Tuple:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase ( nn.Module ):
def __init__( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase = module
UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features , A_ , bias=A_ ) , nn.Linear(A_ , module.out_features , bias=A_ ) , )
UpperCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCamelCase ( self , A_ , *A_ , **A_ ) -> Dict:
"""simple docstring"""
return self.module(A_ , *A_ , **A_ ) + self.adapter(A_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__lowercase : List[str] = "bigscience/bloom-1b7"
# Constant values
__lowercase : List[str] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
__lowercase : List[Any] = "Hello my name is"
__lowercase : str = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
__lowercase : Tuple = 10
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# Models and tokenizer
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ , device_map='auto' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(A_ , 'quantization_config' ) )
UpperCamelCase = config.to_dict()
UpperCamelCase = config.to_diff_dict()
UpperCamelCase = config.to_json_string()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
UpperCamelCase = self.model_fpaa.get_memory_footprint()
UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = BitsAndBytesConfig()
UpperCamelCase = True
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A_ , device_map='auto' )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaises(A_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(A_ ):
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A_ , load_in_abit=A_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(A_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(A_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' )
UpperCamelCase = self.model_fpaa.to(torch.floataa )
UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.float()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=A_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls ) -> str:
"""simple docstring"""
UpperCamelCase = 't5-small'
UpperCamelCase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase = 'Translate in German: Hello, my dog is cute'
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase = None
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A_ , device_map='auto' )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase = model.generate(**A_ )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A_ , device_map='auto' )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase = model.generate(**A_ )
UpperCamelCase = modules
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase = model.generate(**A_ )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A_ , device_map='auto' )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
UpperCamelCase = model.generate(**A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# model_name
UpperCamelCase = 'bigscience/bloom-560m'
UpperCamelCase = 't5-small'
# Different types of model
UpperCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=A_ , device_map='auto' )
# Sequence classification model
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A_ , device_map='auto' )
# CausalLM model
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ , device_map='auto' )
# Seq2seq model
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A_ , device_map='auto' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
super().setUp()
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
UpperCamelCase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 'facebook/opt-350m'
super().setUp()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A_ ) ):
UpperCamelCase = LoRALayer(module.q_proj , rank=16 )
UpperCamelCase = LoRALayer(module.k_proj , rank=16 )
UpperCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCamelCase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase = model.forward(**A_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A_ , A_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "gpt2-xl"
__lowercase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 365 |
from __future__ import annotations
class lowercase :
def __init__( self , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = text, pattern
UpperCamelCase , UpperCamelCase = len(A_ ), len(A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCamelCase ( self ) -> list[int]:
"""simple docstring"""
# searches pattern in text and returns index positions
UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCAmelCase : Union[str, Any] = "ABAABA"
_UpperCAmelCase : Any = "AB"
_UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
_UpperCAmelCase : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 110 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> bool:
'''simple docstring'''
A__ = [int(__lowerCAmelCase ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(__lowerCAmelCase ) == 4 and all(0 <= int(__lowerCAmelCase ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
lowerCAmelCase__ = input().strip()
lowerCAmelCase__ = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 68 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_snake_case : List[Any] = ['gpt2']
_snake_case : Union[str, Any] = 'gpt2'
if is_tf_available():
class _UpperCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> List[str]:
super().__init__()
__lowerCAmelCase = tokenizer
__lowerCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFGPTaLMHeadModel.from_config(lowerCAmelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
__lowerCAmelCase = self.tokenizer(lowerCAmelCase_ )
__lowerCAmelCase = tokenized['input_ids'].to_tensor()
__lowerCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCAmelCase = self.model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ) -> str:
super().setUp()
__lowerCAmelCase = [GPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCAmelCase = [TFGPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase ( self : List[str] ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCAmelCase = tokenizer([test_inputs] , return_tensors='tf' )
__lowerCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCAmelCase = python_outputs[key].numpy()
__lowerCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase ( self : Dict ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.function(lowerCAmelCase_ )
for test_inputs in self.test_sentences:
__lowerCAmelCase = tf.constant(lowerCAmelCase_ )
__lowerCAmelCase = compiled_tokenizer(lowerCAmelCase_ )
__lowerCAmelCase = tf_tokenizer(lowerCAmelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase ( self : Dict ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = ModelToSave(tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = model.serving(lowerCAmelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase = Path(lowerCAmelCase_ ) / 'saved.model'
tf.saved_model.save(lowerCAmelCase_ , lowerCAmelCase_ , signatures={'serving_default': model.serving} )
__lowerCAmelCase = tf.saved_model.load(lowerCAmelCase_ )
__lowerCAmelCase = loaded_model.signatures['serving_default'](lowerCAmelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase ( self : Dict ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(lowerCAmelCase_ ) # Build model with some sample inputs
__lowerCAmelCase = tf_tokenizer.get_config()
__lowerCAmelCase = TFGPTaTokenizer.from_config(lowerCAmelCase_ )
__lowerCAmelCase = model_from_config(lowerCAmelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase ( self : Dict ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCAmelCase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
__lowerCAmelCase = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 207 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DanceDiffusionPipeline
a_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=0 ) -> Any:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase ( self : List[str] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self : List[Any] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 207 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowercase_ : str =[R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self ,A__ ,A__ ,A__ = None ,A__ = 5_0_2_5_7 ,A__ = 1_0_2_4 ,A__ = 7_6_8 ,A__ = 1_2 ,A__ = 1_2 ,A__ = None ,A__ = "gelu_new" ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 1E-5 ,A__ = 0.02 ,A__ = True ,A__ = True ,A__ = False ,A__ = False ,):
super().__init__()
lowercase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.')
lowercase = prefix_inner_dim
lowercase = prefix_hidden_dim
lowercase = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase = (
nn.Linear(self.prefix_hidden_dim ,lowerCamelCase__) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase = GPTaConfig(
vocab_size=lowerCamelCase__ ,n_positions=lowerCamelCase__ ,n_embd=lowerCamelCase__ ,n_layer=lowerCamelCase__ ,n_head=lowerCamelCase__ ,n_inner=lowerCamelCase__ ,activation_function=lowerCamelCase__ ,resid_pdrop=lowerCamelCase__ ,embd_pdrop=lowerCamelCase__ ,attn_pdrop=lowerCamelCase__ ,layer_norm_epsilon=lowerCamelCase__ ,initializer_range=lowerCamelCase__ ,scale_attn_weights=lowerCamelCase__ ,use_cache=lowerCamelCase__ ,scale_attn_by_inverse_layer_idx=lowerCamelCase__ ,reorder_and_upcast_attn=lowerCamelCase__ ,)
lowercase = GPTaLMHeadModel(lowerCamelCase__)
def A__ ( self ,A__ ,A__ ,A__ = None ,A__ = None ,):
lowercase = self.transformer.transformer.wte(lowerCamelCase__)
lowercase = self.encode_prefix(lowerCamelCase__)
lowercase = self.decode_prefix(lowerCamelCase__)
lowercase = torch.cat((prefix_embeds, embedding_text) ,dim=1)
if labels is not None:
lowercase = self.get_dummy_token(input_ids.shape[0] ,input_ids.device)
lowercase = torch.cat((dummy_token, input_ids) ,dim=1)
lowercase = self.transformer(inputs_embeds=lowerCamelCase__ ,labels=lowerCamelCase__ ,attention_mask=lowerCamelCase__)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def A__ ( self ,A__ ,A__):
return torch.zeros(lowerCamelCase__ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase__)
def A__ ( self ,A__):
return self.encode_prefix(lowerCamelCase__)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__):
lowercase = torch.split(lowerCamelCase__ ,1 ,dim=0)
lowercase = []
lowercase = []
for feature in features:
lowercase = self.decode_prefix(feature.to(lowerCamelCase__)) # back to the clip feature
# Only support beam search for now
lowercase , lowercase = self.generate_beam(
input_embeds=lowerCamelCase__ ,device=lowerCamelCase__ ,eos_token_id=lowerCamelCase__)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
lowercase = torch.stack(lowerCamelCase__)
lowercase = torch.stack(lowerCamelCase__)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__ = 5 ,A__ = 6_7 ,A__ = 1.0 ,A__ = None ,):
lowercase = eos_token_id
lowercase = None
lowercase = None
lowercase = torch.ones(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.int)
lowercase = torch.zeros(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.bool)
if input_embeds is not None:
lowercase = input_embeds
else:
lowercase = self.transformer.transformer.wte(lowerCamelCase__)
for i in range(lowerCamelCase__):
lowercase = self.transformer(inputs_embeds=lowerCamelCase__)
lowercase = outputs.logits
lowercase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase = logits.softmax(-1).log()
if scores is None:
lowercase , lowercase = logits.topk(lowerCamelCase__ ,-1)
lowercase = generated.expand(lowerCamelCase__ ,*generated.shape[1:])
lowercase , lowercase = next_tokens.permute(1 ,0), scores.squeeze(0)
if tokens is None:
lowercase = next_tokens
else:
lowercase = tokens.expand(lowerCamelCase__ ,*tokens.shape[1:])
lowercase = torch.cat((tokens, next_tokens) ,dim=1)
else:
lowercase = -float(np.inf)
lowercase = 0
lowercase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase = scores_sum / seq_lengths[:, None]
lowercase , lowercase = scores_sum_average.view(-1).topk(lowerCamelCase__ ,-1)
lowercase = next_tokens // scores_sum.shape[1]
lowercase = seq_lengths[next_tokens_source]
lowercase = next_tokens % scores_sum.shape[1]
lowercase = next_tokens.unsqueeze(1)
lowercase = tokens[next_tokens_source]
lowercase = torch.cat((tokens, next_tokens) ,dim=1)
lowercase = generated[next_tokens_source]
lowercase = scores_sum_average * seq_lengths
lowercase = is_stopped[next_tokens_source]
lowercase = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] ,1 ,-1)
lowercase = torch.cat((generated, next_token_embed) ,dim=1)
lowercase = is_stopped + next_tokens.eq(lowerCamelCase__).squeeze()
if is_stopped.all():
break
lowercase = scores / seq_lengths
lowercase = scores.argsort(descending=lowerCamelCase__)
# tokens tensors are already padded to max_seq_length
lowercase = [tokens[i] for i in order]
lowercase = torch.stack(lowerCamelCase__ ,dim=0)
lowercase = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 101 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(lowerCamelCase__ )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """french fries"""
lowercase__ = sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = [inputs["""prompt"""]] * 2
lowercase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
lowercase__ = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
lowercase__ = image / 2 + 0.5
lowercase__ = image.permute(0 , 3 , 1 , 2 )
lowercase__ = image.repeat(2 , 1 , 1 , 1 )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) )[0]
lowercase__ = components["""vae"""]
lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ = pipe(**lowerCamelCase__ )[0]
lowercase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowercase__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = 0
def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__ = False
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = inputs["""image"""].resize((504, 504) )
lowercase__ = """timbrooks/instruct-pix2pix"""
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 164 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCAmelCase = Lock()
def lowercase__ ( __snake_case : int , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ : Union[str, Any] = min(__snake_case , __snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ : int = max(__snake_case , __snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ : List[Any] = Pipe()
UpperCAmelCase_ : Dict = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ : Optional[int] = temp_rs
UpperCAmelCase_ : Union[str, Any] = temp_rr
for i in range(1 , len(__snake_case ) - 1 ):
UpperCAmelCase_ : List[Any] = Pipe()
UpperCAmelCase_ : Tuple = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ : Tuple = temp_rs
UpperCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=__snake_case , args=(
len(__snake_case ) - 1,
arr[len(__snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__snake_case ) ):
UpperCAmelCase_ : Dict = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__snake_case )
UpperCAmelCase_ : str = odd_even_transposition(__snake_case )
print('Sorted List\n' )
print(*__snake_case )
if __name__ == "__main__":
main()
| 145 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = Dict[str, Any]
__UpperCAmelCase = List[Prediction]
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : str = {}
if "threshold" in kwargs:
UpperCAmelCase_ : Tuple = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = load_image(_UpperCamelCase )
UpperCAmelCase_ : Any = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase_ : str = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
UpperCAmelCase_ : Any = target_size
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Any = model_inputs.pop('target_size' )
UpperCAmelCase_ : Optional[Any] = self.model(**_UpperCamelCase )
UpperCAmelCase_ : Dict = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase_ : List[str] = model_inputs['bbox']
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.9 ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ : str = target_size[0].tolist()
def unnormalize(_UpperCamelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ : Tuple = [unnormalize(_UpperCamelCase ) for bbox in model_outputs['bbox'].squeeze(0 )]
UpperCAmelCase_ : List[str] = ['score', 'label', 'box']
UpperCAmelCase_ : Any = [dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(scores.tolist() , _UpperCamelCase , _UpperCamelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ : Union[str, Any] = self.image_processor.post_process_object_detection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = raw_annotations[0]
UpperCAmelCase_ : str = raw_annotation['scores']
UpperCAmelCase_ : Tuple = raw_annotation['labels']
UpperCAmelCase_ : List[Any] = raw_annotation['boxes']
UpperCAmelCase_ : Union[str, Any] = scores.tolist()
UpperCAmelCase_ : int = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ : Any = [self._get_bounding_box(_UpperCamelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ : int = ['score', 'label', 'box']
UpperCAmelCase_ : Dict = [
dict(zip(_UpperCamelCase , _UpperCamelCase ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = box.int().tolist()
UpperCAmelCase_ : str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 145 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = KandinskyInpaintPipeline
a__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a__ = False
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
return 32
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[int] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__: Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
a__: Optional[Any] = MultilingualCLIP(lowercase)
a__: int = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__: str = UNetaDConditionModel(**lowercase)
return model
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Any = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Dict = self.dummy_text_encoder
a__: int = self.dummy_tokenizer
a__: str = self.dummy_unet
a__: Any = self.dummy_movq
a__: Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
a__: Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , lowercase , lowercase=0) -> Any:
'''simple docstring'''
a__: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase)).to(lowercase)
a__: int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase)
# create init_image
a__: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
a__: int = image.cpu().permute(0 , 2 , 3 , 1)[0]
a__: Optional[int] = Image.fromarray(np.uinta(lowercase)).convert('RGB').resize((2_56, 2_56))
# create mask
a__: Tuple = np.ones((64, 64) , dtype=np.floataa)
a__: Optional[Any] = 0
if str(lowercase).startswith('mps'):
a__: str = torch.manual_seed(lowercase)
else:
a__: Dict = torch.Generator(device=lowercase).manual_seed(lowercase)
a__: Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = 'cpu'
a__: List[Any] = self.get_dummy_components()
a__: Optional[Any] = self.pipeline_class(**lowercase)
a__: str = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__: Optional[int] = pipe(**self.get_dummy_inputs(lowercase))
a__: List[str] = output.images
a__: int = pipe(
**self.get_dummy_inputs(lowercase) , return_dict=lowercase , )[0]
a__: Optional[Any] = image[0, -3:, -3:, -1]
a__: List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
a__: str = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy')
a__: int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
a__: Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa)
a__: int = 0
a__: Optional[int] = 'a hat'
a__: int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase)
a__: Any = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa)
a__: Optional[Any] = pipeline.to(lowercase)
pipeline.set_progress_bar_config(disable=lowercase)
a__: Dict = torch.Generator(device='cpu').manual_seed(0)
a__ , a__: Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
a__: List[str] = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
a__: str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 290 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : int = VideoToVideoSDPipeline
UpperCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase__ : Optional[Any] = False
# No `output_type`.
UpperCamelCase__ : str = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_A )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _A ( self , _A , _A=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**_A )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE = 'np'
__SCREAMING_SNAKE_CASE = sd_pipe(**_A ).frames
__SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _A ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__SCREAMING_SNAKE_CASE = torch.Generator(device='cpu' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = torch.randn((1, 10, 3, 1_024, 576) , generator=_A )
__SCREAMING_SNAKE_CASE = video.to('cuda' )
__SCREAMING_SNAKE_CASE = 'Spiderman is surfing'
__SCREAMING_SNAKE_CASE = pipe(_A , video=_A , generator=_A , num_inference_steps=3 , output_type='pt' ).frames
__SCREAMING_SNAKE_CASE = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 118 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
__SCREAMING_SNAKE_CASE = [5, 5, 5, 5]
elif "fl4" in model_name:
__SCREAMING_SNAKE_CASE = [4, 4, 4, 4]
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "lrf" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
else:
__SCREAMING_SNAKE_CASE = [2, 2, 2, 2]
if "tiny" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "small" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "base" in model_name:
__SCREAMING_SNAKE_CASE = 1_28
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1_92
elif "xlarge" in model_name:
__SCREAMING_SNAKE_CASE = 2_56
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 3_52
# set label information
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__SCREAMING_SNAKE_CASE = 'imagenet-22k-id2label.json'
else:
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = FocalNetConfig(
embed_dim=a__ , depths=a__ , focal_levels=a__ , focal_windows=a__ , use_conv_embed=a__ , idalabel=a__ , labelaid=a__ , use_post_layernorm=a__ , use_layerscale=a__ , )
return config
def __lowercase ( a__ ) -> Any:
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__SCREAMING_SNAKE_CASE = 'encoder.' + name
if "encoder.layers" in name:
__SCREAMING_SNAKE_CASE = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = 'layernorm.weight'
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = 'layernorm.bias'
if "head" in name:
__SCREAMING_SNAKE_CASE = name.replace('head' , 'classifier' )
else:
__SCREAMING_SNAKE_CASE = 'focalnet.' + name
return name
def __lowercase ( a__ , a__ , a__=False ) -> Dict:
# fmt: off
__SCREAMING_SNAKE_CASE = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
print('Checkpoint URL: ' , a__ )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = get_focalnet_config(a__ )
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(a__ )
model.eval()
# load state dict
model.load_state_dict(a__ )
# verify conversion
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=a__ , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=a__ , crop_size=2_24 , do_normalize=a__ , image_mean=a__ , image_std=a__ , )
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__SCREAMING_SNAKE_CASE = image_transforms(a__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a__ , atol=1E-4 )
__SCREAMING_SNAKE_CASE = model(**a__ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__SCREAMING_SNAKE_CASE = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__SCREAMING_SNAKE_CASE = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 118 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_lowerCAmelCase : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase__ = Image.open(_lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class _UpperCamelCase :
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase_ = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase_ = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self :Any ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class _UpperCamelCase :
UpperCAmelCase_ = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase_ = field(default=UpperCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase_ = field(
default=UpperCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCAmelCase ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = torch.stack([example["pixel_values"] for example in examples] )
UpperCAmelCase__ = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase__ = {}
if data_args.train_dir is not None:
UpperCAmelCase__ = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
UpperCAmelCase__ = os.path.join(data_args.validation_dir , "**" )
UpperCAmelCase__ = load_dataset(
"imagefolder" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase__ = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCAmelCase ) and data_args.train_val_split > 0.0:
UpperCAmelCase__ = dataset["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase__ = split["train"]
UpperCAmelCase__ = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase__ = dataset["train"].features["labels"].names
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
for i, label in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ = str(_lowerCAmelCase )
UpperCAmelCase__ = label
# Load the accuracy metric from the datasets package
UpperCAmelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : str ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCAmelCase__ = image_processor.size["shortest_edge"]
else:
UpperCAmelCase__ = (image_processor.size["height"], image_processor.size["width"])
UpperCAmelCase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCAmelCase__ = Compose(
[
RandomResizedCrop(_lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCAmelCase__ = Compose(
[
Resize(_lowerCAmelCase ),
CenterCrop(_lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_lowerCAmelCase : Tuple ):
UpperCAmelCase__ = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_lowerCAmelCase : List[str] ):
UpperCAmelCase__ = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase__ = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase__ = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowerCAmelCase )
# Initalize our trainer
UpperCAmelCase__ = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ = last_checkpoint
UpperCAmelCase__ = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
lowercase__ = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
lowercase__ = ''''''
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
lowercase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase__ = []
lowercase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowercase__ = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowercase__ = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 110 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
A = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
A = 'The dog is cute and lives in the garden house'
A = jnp.array([tokenizer.encode(_lowerCAmelCase )] )
A = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
A = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
A = model(_lowerCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Tuple = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : pyspark.sql.DataFrame, lowerCamelCase : Optional[NamedSplit] = None, lowerCamelCase : Optional[Features] = None, lowerCamelCase : bool = True, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : str = None, lowerCamelCase : bool = True, lowerCamelCase : str = "arrow", **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(
split=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, **lowerCamelCase, )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase, working_dir=lowerCamelCase, **lowerCamelCase, )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 207 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : str = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 368 |
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''keras_nlp''']
def __init__( self : Optional[int] , *A_ : Any , **A_ : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['keras_nlp'] )
| 208 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__a = HUGGINGFACE_HUB_CACHE
__a = 'config.json'
__a = 'diffusion_pytorch_model.bin'
__a = 'diffusion_flax_model.msgpack'
__a = 'model.onnx'
__a = 'diffusion_pytorch_model.safetensors'
__a = 'weights.pb'
__a = 'https://huggingface.co'
__a = default_cache_path
__a = 'diffusers_modules'
__a = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
__a = ['fp16', 'non-ema']
__a = '.self_attn' | 145 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : CLIPSegForImageSegmentation , lowerCAmelCase__ : CLIPSegProcessor , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_UpperCAmelCase : str = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : Any = dict(scheduler.config )
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Optional[Any] = FrozenDict(lowerCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_UpperCAmelCase : Union[str, Any] = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = dict(scheduler.config )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Dict = FrozenDict(lowerCAmelCase__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Dict = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_UpperCAmelCase : List[Any] = self.segmentation_model(**lowerCAmelCase__ )
_UpperCAmelCase : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , ) | 145 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : list[tuple[float, float]] ):
lowercase__ : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase__ : Dict = len(_A ) - 1
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase__ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) , 5 ) == 1
return output_values
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase__ : Optional[int] = self.basis_function(_A )
lowercase__ : int = 0.0
lowercase__ : List[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
lowercase__ : list[float] = [] # x coordinates of points to plot
lowercase__ : list[float] = [] # y coordinates of points to plot
lowercase__ : List[Any] = 0.0
while t <= 1:
lowercase__ : List[str] = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase__ : Optional[Any] = [i[0] for i in self.list_of_points]
lowercase__ : Any = [i[1] for i in self.list_of_points]
plt.plot(
_A , _A , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(_A , _A , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 357 |
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( lowerCamelCase__ = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return True
lowercase__ : str = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : List[Any] = character_freq_dict.get(lowerCamelCase__ , 0 ) + 1
lowercase__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
print("\nFor string = " , lowerCamelCase__ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCAmelCase__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowerCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 121 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.