code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=0 , ) -> Dict:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = projection_dim
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
A_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFDPRContextEncoder(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TFDPRQuestionEncoder(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = TFDPRReader(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFDPRModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
A_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
A_ = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A_ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 667 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = 50 # max width of layer names
__lowerCamelCase = 70 # max width of quantizer names
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""", type=UpperCAmelCase__, default=8, help="""weight precision""" )
group.add_argument("""--aprec""", type=UpperCAmelCase__, default=8, help="""activation precision""" )
group.add_argument("""--quant-per-tensor""", action="""store_true""", help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""", action="""store_true""", help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""", action="""store_true""", help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""", type=UpperCAmelCase__, nargs="""+""", help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""", type=UpperCAmelCase__, help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""", type=UpperCAmelCase__, help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""", default="""max""", help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""", action="""store_true""", help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""", metavar="""N""", type=UpperCAmelCase__, help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""", action="""store_true""", help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
), )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
if args.calibrator == "max":
A_ = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ = """histogram"""
elif args.calibrator == "mse":
A_ = """histogram"""
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
A_ = QuantDescriptor(num_bits=args.aprec, calib_method=UpperCAmelCase__ )
A_ = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCAmelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False, UpperCAmelCase__=False ) -> Optional[Any]:
logger.info("""Configuring Model for Quantization""" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCAmelCase__, ["""embeddings"""], which="""weight""", _disabled=UpperCAmelCase__ )
if args.quant_disable:
set_quantizer_by_name(UpperCAmelCase__, [""""""], _disabled=UpperCAmelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCAmelCase__, args.quant_disable_keyword, _disabled=UpperCAmelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCAmelCase__, [r"""layer.\d+.""" + args.quant_disable_layer_module], _disabled=UpperCAmelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCAmelCase__, [r"""layer.\d+.""" + args.quant_enable_layer_module], _disabled=UpperCAmelCase__ )
if args.recalibrate_weights:
recalibrate_weights(UpperCAmelCase__ )
if args.fuse_qkv:
fuse_qkv(UpperCAmelCase__, UpperCAmelCase__ )
if args.clip_gelu:
clip_gelu(UpperCAmelCase__, args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""", percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
def fusea(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCAmelCase__, """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ = qq._amax.detach().item()
A_ = qk._amax.detach().item()
A_ = qv._amax.detach().item()
A_ = max(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
qq._amax.fill_(UpperCAmelCase__ )
qk._amax.fill_(UpperCAmelCase__ )
qv._amax.fill_(UpperCAmelCase__ )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCAmelCase__ )
A_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase__, """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ = mod.weight.shape[0]
A_ = mod._weight_quantizer._amax.detach()
A_ = torch.ones(UpperCAmelCase__, dtype=amax.dtype, device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase__, """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer, """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ = pytorch_quantization.utils.reduce_amax(mod.weight, axis=UpperCAmelCase__, keepdims=UpperCAmelCase__ ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
A_ = amax
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=25, UpperCAmelCase__=1_80, UpperCAmelCase__=None ) -> Optional[int]:
if ignore is None:
A_ = []
elif not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = [ignore]
A_ = 0
for name, mod in model.named_modules():
if not hasattr(UpperCAmelCase__, """weight""" ):
continue
A_ = max(UpperCAmelCase__, len(UpperCAmelCase__ ) )
for name, mod in model.named_modules():
A_ = getattr(UpperCAmelCase__, """_input_quantizer""", UpperCAmelCase__ )
A_ = getattr(UpperCAmelCase__, """_weight_quantizer""", UpperCAmelCase__ )
if not hasattr(UpperCAmelCase__, """weight""" ):
continue
if type(UpperCAmelCase__ ) in ignore:
continue
if [True for s in ignore if type(UpperCAmelCase__ ) is str and s in name]:
continue
A_ = F'''Act:{input_q.extra_repr()}'''
A_ = F'''Wgt:{weight_q.extra_repr()}'''
A_ = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(UpperCAmelCase__ ) <= line_width:
logger.info(UpperCAmelCase__ )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = 0
for name, mod in model.named_modules():
if isinstance(UpperCAmelCase__, pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = getattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if quantizer_mod is not None:
assert hasattr(UpperCAmelCase__, UpperCAmelCase__ )
setattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__="both", **UpperCAmelCase__ ) -> Any:
A_ = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(UpperCAmelCase__, UpperCAmelCase__, """_input_quantizer""", UpperCAmelCase__, UpperCAmelCase__ )
if which in ["weight", "both"]:
set_quantizer(UpperCAmelCase__, UpperCAmelCase__, """_weight_quantizer""", UpperCAmelCase__, UpperCAmelCase__ )
logger.info(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ) -> Optional[Any]:
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase__, """_input_quantizer""" ) or hasattr(UpperCAmelCase__, """_weight_quantizer""" ):
for n in names:
if re.search(UpperCAmelCase__, UpperCAmelCase__ ):
set_quantizers(UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
logger.info(UpperCAmelCase__ )
| 667 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Any:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Optional[Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = embedding_size
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TFMobileBertModel(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForMaskedLM(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForNextSentencePrediction(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForPreTraining(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForSequenceClassification(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.num_choices
A_ = TFMobileBertForMultipleChoice(config=UpperCamelCase__ )
A_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForTokenClassification(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = TFMobileBertForQuestionAnswering(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ = TFMobileBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCamelCase__ )[0]
A_ = [1, 6, 30522]
self.assertEqual(output.shape , UpperCamelCase__ )
A_ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 667 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = [[] for _ in range(UpperCAmelCase__ )]
A_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(UpperCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase__ ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase__ )
A_ = ["""""".join(UpperCAmelCase__ ) for row in temp_grid]
A_ = """""".join(UpperCAmelCase__ )
return output_string
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = []
A_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
A_ = [[] for _ in range(UpperCAmelCase__ )] # generates template
for position in range(len(UpperCAmelCase__ ) ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
A_ = 0
for row in temp_grid: # fills in the characters
A_ = input_string[counter : counter + len(UpperCAmelCase__ )]
grid.append(list(UpperCAmelCase__ ) )
counter += len(UpperCAmelCase__ )
A_ = """""" # reads as zigzag
for position in range(len(UpperCAmelCase__ ) ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> dict[int, str]:
A_ = {}
for key_guess in range(1, len(UpperCAmelCase__ ) ): # tries every key
A_ = decrypt(UpperCAmelCase__, UpperCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> datetime:
A_ = year % 19
A_ = year % 4
A_ = year % 7
A_ = math.floor(year / 1_00 )
A_ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
A_ = leap_day_inhibits / 4
A_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
A_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
A_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
A_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase__, 4, 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase__, 4, 18 )
else:
return datetime(UpperCAmelCase__, 3, 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__lowerCamelCase = '''will be''' if year > datetime.now().year else '''was'''
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 667 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 1 |
'''simple docstring'''
import os
def UpperCAmelCase__ ( ) -> Union[str, Any]:
with open(os.path.dirname(UpperCAmelCase__ ) + """/grid.txt""" ) as f:
A_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCAmelCase__ ) for x in f.readline().split()] )
A_ = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
A_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 667 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_snake_case ):
lowercase = ["keras_nlp"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=UpperCamelCase__ ).to(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A_ = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A_ = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A_ = model(input_ids.to(UpperCamelCase__ ) , labels=labels.to(UpperCamelCase__ ) ).loss
A_ = -(labels.shape[-1] * loss.item())
A_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size if size is not None else {"""height""": 18, """width""": 20}
A_ = do_thumbnail
A_ = do_align_axis
A_ = do_pad
A_ = do_normalize
A_ = image_mean
A_ = image_std
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = DonutImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = DonutImageProcessingTester(self )
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_flaky()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_ ( self ) -> int:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 667 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 1 |
'''simple docstring'''
from math import isqrt
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
A_ = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCAmelCase__, UpperCAmelCase__ ):
A_ = False
return [i for i in range(2, UpperCAmelCase__ ) if is_prime[i]]
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**8 ) -> int:
A_ = calculate_prime_numbers(max_number // 2 )
A_ = 0
A_ = 0
A_ = len(UpperCAmelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCamelCase = '''CompVis/stable-diffusion-v1-1'''
__lowerCamelCase = '''CompVis/stable-diffusion-v1-2'''
__lowerCamelCase = '''CompVis/stable-diffusion-v1-3'''
__lowerCamelCase = '''CompVis/stable-diffusion-v1-4'''
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True , ) -> Optional[Any]:
'''simple docstring'''
super()._init_()
A_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
A_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
A_ = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
A_ = StableDiffusionPipeline(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=UpperCamelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def snake_case_ ( self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , UpperCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def snake_case_ ( self , UpperCamelCase__ = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
@torch.no_grad()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
@torch.no_grad()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
@torch.no_grad()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
@torch.no_grad()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
return self.pipea(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
@torch.no_grad()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
A_ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(UpperCamelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
A_ = self.textaimg_sda_a(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
A_ = self.textaimg_sda_a(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
A_ = self.textaimg_sda_a(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
A_ = self.textaimg_sda_a(
prompt=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , **UpperCamelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowerCamelCase = logging.get_logger(__name__)
enable_full_determinism()
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = UNetaDModel
lowercase = "sample"
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = 4
A_ = 3
A_ = (32, 32)
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
A_ = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
A_ = self.dummy_input
return init_dict, inputs_dict
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = UNetaDModel
lowercase = "sample"
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = 4
A_ = 4
A_ = (32, 32)
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
A_ = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return (4, 32, 32)
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
A_ = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ , A_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
A_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ , A_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
A_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
A_ , A_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
A_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
A_ = noise.to(UpperCamelCase__ )
A_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
A_ = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A_ , A_ = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
A_ = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCamelCase__ )
A_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A_ = noise.to(UpperCamelCase__ )
A_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
A_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = UNetaDModel
lowercase = "sample"
@property
def snake_case_ ( self , UpperCamelCase__=(32, 32) ) -> Union[str, Any]:
'''simple docstring'''
A_ = 4
A_ = 3
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
A_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
A_ = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ , A_ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
A_ = self.dummy_input
A_ = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
A_ = noise
A_ = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCamelCase__ )
A_ = 4
A_ = 3
A_ = (256, 256)
A_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
A_ = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
A_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCamelCase__ )
A_ = 4
A_ = 3
A_ = (32, 32)
A_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
A_ = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
A_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
# not required for this model
pass
| 667 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( _snake_case ):
@staticmethod
@abstractmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
| 667 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list:
if any(not isinstance(UpperCAmelCase__, UpperCAmelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCAmelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCAmelCase__, sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 667 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
create_state_space_tree(UpperCAmelCase__, [], 0, [0 for i in range(len(UpperCAmelCase__ ) )] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> None:
if index == len(UpperCAmelCase__ ):
print(UpperCAmelCase__ )
return
for i in range(len(UpperCAmelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A_ = True
create_state_space_tree(UpperCAmelCase__, UpperCAmelCase__, index + 1, UpperCAmelCase__ )
current_sequence.pop()
A_ = False
__lowerCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ViTFeatureExtractor''']
__lowerCamelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# load base model
A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A_ = load_file(UpperCAmelCase__ )
A_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A_ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
A_ = pipeline.text_encoder
else:
A_ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
A_ = pipeline.unet
# find the target layer
A_ = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
A_ = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
A_ = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A_ = layer_infos.pop(0 )
A_ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""", """lora_up""" ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace("""lora_up""", """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__, UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
A_ = state_dict[pair_keys[0]].to(torch.floataa )
A_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__, UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.base_model_path
__lowerCamelCase = args.checkpoint_path
__lowerCamelCase = args.dump_path
__lowerCamelCase = args.lora_prefix_unet
__lowerCamelCase = args.lora_prefix_text_encoder
__lowerCamelCase = args.alpha
__lowerCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCamelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 667 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
A_ = str(UpperCAmelCase__ )
A_ = [n]
for i in range(1, len(UpperCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if len(str(UpperCAmelCase__ ) ) > 3:
if not is_prime(int(str(UpperCAmelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCAmelCase__ )[:3] ) ):
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ = 11 ) -> list[int]:
A_ = []
A_ = 13
while len(UpperCAmelCase__ ) != count:
if validate(UpperCAmelCase__ ):
A_ = list_truncated_nums(UpperCAmelCase__ )
if all(is_prime(UpperCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(UpperCAmelCase__ )
num += 2
return list_truncated_primes
def UpperCAmelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 667 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A__ ( _snake_case ):
lowercase = 42
lowercase = jnp.floataa
lowercase = True
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().setup()
A_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
A_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A__ ( _snake_case ):
lowercase = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
def cross_entropy(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None ):
A_ = logits.shape[-1]
A_ = (labels[..., None] == jnp.arange(UpperCAmelCase__ )[None]).astype("""f4""" )
A_ = jax.nn.log_softmax(UpperCAmelCase__, axis=-1 )
A_ = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
A_ = reduction(UpperCAmelCase__ )
return loss
A_ = partial(UpperCAmelCase__, reduction=jnp.mean )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A__ :
lowercase = "google/bigbird-roberta-base"
lowercase = 3_000
lowercase = 10_500
lowercase = 128
lowercase = 3
lowercase = 1
lowercase = 5
# tx_args
lowercase = 3e-5
lowercase = 0.0
lowercase = 20_000
lowercase = 0.0095
lowercase = "bigbird-roberta-natural-questions"
lowercase = "training-expt"
lowercase = "data/nq-training.jsonl"
lowercase = "data/nq-validation.jsonl"
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=UpperCamelCase__ )
A_ = os.path.join(self.base_dir , self.save_dir )
A_ = self.batch_size_per_device * jax.device_count()
@dataclass
class A__ :
lowercase = 42
lowercase = 4_096 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.collate_fn(UpperCamelCase__ )
A_ = jax.tree_util.tree_map(UpperCamelCase__ , UpperCamelCase__ )
return batch
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ , A_ = self.fetch_inputs(features["""input_ids"""] )
A_ = {
"""input_ids""": jnp.array(UpperCamelCase__ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(UpperCamelCase__ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = [self._fetch_inputs(UpperCamelCase__ ) for ids in input_ids]
return zip(*UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = [1 for _ in range(len(UpperCamelCase__ ) )]
while len(UpperCamelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None ) -> int:
if seed is not None:
A_ = dataset.shuffle(seed=UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) // batch_size ):
A_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase__ )
@partial(jax.pmap, axis_name="""batch""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ) -> Any:
def loss_fn(UpperCAmelCase__ ):
A_ = model_inputs.pop("""start_labels""" )
A_ = model_inputs.pop("""end_labels""" )
A_ = model_inputs.pop("""pooled_labels""" )
A_ = state.apply_fn(**UpperCAmelCase__, params=UpperCAmelCase__, dropout_rng=UpperCAmelCase__, train=UpperCAmelCase__ )
A_ , A_ , A_ = outputs
return state.loss_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ , A_ = jax.random.split(UpperCAmelCase__ )
A_ = jax.value_and_grad(UpperCAmelCase__ )
A_ , A_ = grad_fn(state.params )
A_ = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
A_ = jax.lax.pmean(UpperCAmelCase__, """batch""" )
A_ = state.apply_gradients(grads=UpperCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name="""batch""" )
def UpperCAmelCase__ ( UpperCAmelCase__, **UpperCAmelCase__ ) -> List[str]:
A_ = model_inputs.pop("""start_labels""" )
A_ = model_inputs.pop("""end_labels""" )
A_ = model_inputs.pop("""pooled_labels""" )
A_ = state.apply_fn(**UpperCAmelCase__, params=state.params, train=UpperCAmelCase__ )
A_ , A_ , A_ = outputs
A_ = state.loss_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
return metrics
class A__ ( train_state.TrainState ):
lowercase = struct.field(pytree_node=_snake_case )
@dataclass
class A__ :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = None
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> int:
'''simple docstring'''
A_ = model.params
A_ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase__ , tx=UpperCamelCase__ , loss_fn=UpperCamelCase__ , )
if ckpt_dir is not None:
A_ , A_ , A_ , A_ , A_ = restore_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
A_ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
A_ , A_ = build_tx(**UpperCamelCase__ )
A_ = train_state.TrainState(
step=UpperCamelCase__ , apply_fn=model.__call__ , params=UpperCamelCase__ , tx=UpperCamelCase__ , opt_state=UpperCamelCase__ , )
A_ = args
A_ = data_collator
A_ = lr
A_ = params
A_ = jax_utils.replicate(UpperCamelCase__ )
return state
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.args
A_ = len(UpperCamelCase__ ) // args.batch_size
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(UpperCamelCase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = get_batched_dataset(UpperCamelCase__ , args.batch_size , seed=UpperCamelCase__ )
A_ = 0
for batch in tqdm(UpperCamelCase__ , total=UpperCamelCase__ , desc=f'''Running EPOCH-{epoch}''' ):
A_ = self.data_collator(UpperCamelCase__ )
A_ , A_ , A_ = self.train_step_fn(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
A_ = jax_utils.unreplicate(state.step )
A_ = running_loss.item() / i
A_ = self.scheduler_fn(state_step - 1 )
A_ = self.evaluate(UpperCamelCase__ , UpperCamelCase__ )
A_ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(UpperCamelCase__ ) )
self.logger.log(UpperCamelCase__ , commit=UpperCamelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = get_batched_dataset(UpperCamelCase__ , self.args.batch_size )
A_ = len(UpperCamelCase__ ) // self.args.batch_size
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = 0
for batch in tqdm(UpperCamelCase__ , total=UpperCamelCase__ , desc="""Evaluating ... """ ):
A_ = self.data_collator(UpperCamelCase__ )
A_ = self.val_step_fn(UpperCamelCase__ , **UpperCamelCase__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = jax_utils.unreplicate(UpperCamelCase__ )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(UpperCamelCase__ , params=state.params )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCamelCase__ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , UpperCamelCase__ )
print("""DONE""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''', end=""" ... """ )
with open(os.path.join(UpperCAmelCase__, """flax_model.msgpack""" ), """rb""" ) as f:
A_ = from_bytes(state.params, f.read() )
with open(os.path.join(UpperCAmelCase__, """opt_state.msgpack""" ), """rb""" ) as f:
A_ = from_bytes(state.opt_state, f.read() )
A_ = joblib.load(os.path.join(UpperCAmelCase__, """args.joblib""" ) )
A_ = joblib.load(os.path.join(UpperCAmelCase__, """data_collator.joblib""" ) )
with open(os.path.join(UpperCAmelCase__, """training_state.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = num_train_steps - warmup_steps
A_ = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=UpperCAmelCase__, transition_steps=UpperCAmelCase__ )
A_ = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=1e-7, transition_steps=UpperCAmelCase__ )
A_ = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
def weight_decay_mask(UpperCAmelCase__ ):
A_ = traverse_util.flatten_dict(UpperCAmelCase__ )
A_ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase__ )
A_ = scheduler_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = optax.adamw(learning_rate=UpperCAmelCase__, weight_decay=UpperCAmelCase__, mask=UpperCAmelCase__ )
return tx, lr
| 667 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 1_00, ) -> float:
A_ = x_start
A_ = fnc(UpperCAmelCase__ )
A_ = 0.0
for _ in range(UpperCAmelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ = (x_end - x_start) / steps + xa
A_ = fnc(UpperCAmelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ = xa
A_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__lowerCamelCase = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 667 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( ) -> Dict:
A_ = """https://pypi.org/pypi/diffusers/json"""
A_ = json.loads(request.urlopen(UpperCAmelCase__ ).read() )["""releases"""].keys()
return sorted(UpperCAmelCase__, key=lambda UpperCAmelCase__ : version.Version(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( ) -> Tuple:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCAmelCase__ )
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A_ = Path(UpperCAmelCase__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
init_hf_modules()
A_ = Path(UpperCAmelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
A_ = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""" ) as f:
A_ = f.read()
# Imports of the form `import .xxx`
A_ = re.findall("""^\s*import\s+\.(\S+)\s*$""", UpperCAmelCase__, flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""", UpperCAmelCase__, flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = False
A_ = [module_file]
A_ = []
# Let's recurse through all relative imports
while not no_change:
A_ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCAmelCase__ ) )
A_ = Path(UpperCAmelCase__ ).parent
A_ = [str(module_path / m ) for m in new_imports]
A_ = [f for f in new_import_files if f not in all_relative_imports]
A_ = [F'''{f}.py''' for f in new_import_files]
A_ = len(UpperCAmelCase__ ) == 0
all_relative_imports.extend(UpperCAmelCase__ )
return all_relative_imports
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""" ) as f:
A_ = f.read()
# Imports of the form `import xxx`
A_ = re.findall("""^\s*import\s+(\S+)\s*$""", UpperCAmelCase__, flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""", UpperCAmelCase__, flags=re.MULTILINE )
# Only keep the top-level module
A_ = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A_ = list(set(UpperCAmelCase__ ) )
A_ = []
for imp in imports:
try:
importlib.import_module(UpperCAmelCase__ )
except ImportError:
missing_packages.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{", ".join(UpperCAmelCase__ )}. Run `pip install {" ".join(UpperCAmelCase__ )}`''' )
return get_relative_imports(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = module_path.replace(os.path.sep, """.""" )
A_ = importlib.import_module(UpperCAmelCase__ )
if class_name is None:
return find_pipeline_class(UpperCAmelCase__ )
return getattr(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
from ..pipelines import DiffusionPipeline
A_ = dict(inspect.getmembers(UpperCAmelCase__, inspect.isclass ) )
A_ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls, UpperCAmelCase__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
A_ = cls
return pipeline_class
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = False, UpperCAmelCase__ = False, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = False, ) -> List[str]:
A_ = str(UpperCAmelCase__ )
A_ = os.path.join(UpperCAmelCase__, UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ):
A_ = module_file_or_url
A_ = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A_ = get_diffusers_versions()
# cut ".dev0"
A_ = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A_ = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
A_ = F'''v{revision}'''
elif revision == "main":
A_ = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
A_ = COMMUNITY_PIPELINES_URL.format(revision=UpperCAmelCase__, pipeline=UpperCAmelCase__ )
try:
A_ = cached_download(
UpperCAmelCase__, cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, proxies=UpperCAmelCase__, resume_download=UpperCAmelCase__, local_files_only=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, )
A_ = """git"""
A_ = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
A_ = hf_hub_download(
UpperCAmelCase__, UpperCAmelCase__, cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, proxies=UpperCAmelCase__, resume_download=UpperCAmelCase__, local_files_only=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, )
A_ = os.path.join("""local""", """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
A_ = check_imports(UpperCAmelCase__ )
# Now we move the module inside our cached dynamic modules.
A_ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCAmelCase__ )
A_ = Path(UpperCAmelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCAmelCase__, submodule_path / module_file )
for module_needed in modules_needed:
A_ = F'''{module_needed}.py'''
shutil.copy(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = use_auth_token
elif use_auth_token is True:
A_ = HfFolder.get_token()
else:
A_ = None
A_ = model_info(UpperCAmelCase__, revision=UpperCAmelCase__, token=UpperCAmelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ = submodule_path / commit_hash
A_ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCAmelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCAmelCase__, submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCAmelCase__, F'''{module_needed}.py''', cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, resume_download=UpperCAmelCase__, proxies=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, revision=UpperCAmelCase__, local_files_only=UpperCAmelCase__, )
return os.path.join(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = False, UpperCAmelCase__ = False, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = False, **UpperCAmelCase__, ) -> List[str]:
A_ = get_cached_module_file(
UpperCAmelCase__, UpperCAmelCase__, cache_dir=UpperCAmelCase__, force_download=UpperCAmelCase__, resume_download=UpperCAmelCase__, proxies=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, revision=UpperCAmelCase__, local_files_only=UpperCAmelCase__, )
return get_class_in_module(UpperCAmelCase__, final_module.replace(""".py""", """""" ) )
| 667 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> tuple[float, list[float]]:
A_ = list(range(len(UpperCAmelCase__ ) ) )
A_ = [v / w for v, w in zip(UpperCAmelCase__, UpperCAmelCase__ )]
index.sort(key=lambda UpperCAmelCase__ : ratio[i], reverse=UpperCAmelCase__ )
A_ = 0
A_ = [0] * len(UpperCAmelCase__ )
for i in index:
if weight[i] <= capacity:
A_ = 1
max_value += value[i]
capacity -= weight[i]
else:
A_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ = {"""unk_token""": """<unk>"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
A_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A_ = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ = self.get_image_processor(do_normalize=UpperCamelCase__ )
A_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCamelCase__ , return_tensors="""np""" )
A_ = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = processor(text=UpperCamelCase__ , return_tensors="""np""" )
A_ = tokenizer(UpperCamelCase__ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = """google/owlvit-base-patch32"""
A_ = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
A_ = ["""cat""", """nasa badge"""]
A_ = processor(text=UpperCamelCase__ )
A_ = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = """google/owlvit-base-patch32"""
A_ = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
A_ = [["""cat""", """nasa badge"""], ["""person"""]]
A_ = processor(text=UpperCamelCase__ )
A_ = 16
A_ = len(UpperCamelCase__ )
A_ = max([len(UpperCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = """google/owlvit-base-patch32"""
A_ = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
A_ = ["""cat""", """nasa badge"""]
A_ = processor(text=UpperCamelCase__ )
A_ = 16
A_ = inputs["""input_ids"""]
A_ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = self.prepare_image_inputs()
A_ = self.prepare_image_inputs()
A_ = processor(images=UpperCamelCase__ , query_images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCamelCase__ )
A_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 667 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( _snake_case ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "BlipImageProcessor"
lowercase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.image_processor
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ = self.tokenizer
A_ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
A_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
A_ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
A_ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 667 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCamelCase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 1 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = 0
A_ = [0]
A_ = [0]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 0 )
A_ = [60]
A_ = [10]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 0 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = 3
A_ = [1, 2, 3]
A_ = [3, 2, 1]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 5 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = 50
A_ = [60, 100, 120]
A_ = [10, 20, 30]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> "list[int]":
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ = 1
if upper_limit > 0:
A_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1 ):
for j in range(UpperCAmelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__lowerCamelCase = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( ) -> Iterator[int]:
A_ = 2
while True:
if is_prime(UpperCAmelCase__ ):
yield num
num += 1
def UpperCAmelCase__ ( UpperCAmelCase__ = 2_00_00_00 ) -> int:
return sum(takewhile(lambda UpperCAmelCase__ : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class A__ ( _snake_case ):
@require_torch
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A_ = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task="""fill-mask""" , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ = """1"""
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> str:
'''simple docstring'''
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A_ = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task="""fill-mask""" , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ = self.get_env()
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
A_ = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ = self.get_env()
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
A_ = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ = """1"""
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """
from transformers import pipeline
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
A_ = self.get_env()
A_ = """1"""
A_ = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = """
from transformers import AutoModel
"""
A_ = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ = self.get_env()
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ = """1"""
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = "upernet"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=384 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = backbone_config.get("""model_type""" )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(UpperCamelCase__ )
A_ = backbone_config
A_ = hidden_size
A_ = initializer_range
A_ = pool_scales
A_ = use_auxiliary_head
A_ = auxiliary_loss_weight
A_ = auxiliary_in_channels
A_ = auxiliary_channels
A_ = auxiliary_num_convs
A_ = auxiliary_concat_input
A_ = loss_ignore_index
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = copy.deepcopy(self.__dict__ )
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
| 667 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A__ ( datasets.BuilderConfig ):
lowercase = None
class A__ ( datasets.ArrowBasedBuilder ):
lowercase = PandasConfig
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
A_ = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A_ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def snake_case_ ( self , UpperCamelCase__ ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ = table_cast(UpperCamelCase__ , self.config.features.arrow_schema )
return pa_table
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , """rb""" ) as f:
A_ = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase__ ) )
yield i, self._cast_table(UpperCamelCase__ )
| 667 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> np.array:
A_ = int(np.ceil((x_end - xa) / step_size ) )
A_ = np.zeros((n + 1,) )
A_ = ya
A_ = xa
for k in range(UpperCAmelCase__ ):
A_ = y[k] + step_size * ode_func(UpperCAmelCase__, y[k] )
A_ = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__, y[k] ) + ode_func(x + step_size, UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__lowerCamelCase = get_logger(__name__)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = module._original_module if isinstance(UpperCamelCase__ , _PatchedModuleObj ) else module
class A__ :
lowercase = []
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> str:
'''simple docstring'''
A_ = obj
A_ = target
A_ = new
A_ = target.split(""".""" )[0]
A_ = {}
A_ = attrs or []
def __enter__( self ) -> List[Any]:
'''simple docstring'''
*A_ , A_ = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCamelCase__ ) ):
try:
A_ = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ = getattr(self.obj , UpperCamelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCamelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ = obj_attr
# patch at top level
setattr(self.obj , UpperCamelCase__ , _PatchedModuleObj(UpperCamelCase__ , attrs=self.attrs ) )
A_ = getattr(self.obj , UpperCamelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCamelCase__ , UpperCamelCase__ , _PatchedModuleObj(getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , attrs=self.attrs ) )
A_ = getattr(UpperCamelCase__ , UpperCamelCase__ )
# finally set the target attribute
setattr(UpperCamelCase__ , UpperCamelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ = getattr(import_module(""".""".join(UpperCamelCase__ ) ) , UpperCamelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCamelCase__ ) is attr_value:
A_ = getattr(self.obj , UpperCamelCase__ )
setattr(self.obj , UpperCamelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ = globals()["""__builtins__"""][target_attr]
setattr(self.obj , UpperCamelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , UpperCamelCase__ , self.original.pop(UpperCamelCase__ ) )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 667 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=10 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[1, 1, 2, 1] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = embeddings_size
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = num_labels
A_ = scope
A_ = len(UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> int:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = TFResNetModel(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFResNetForImageClassification(UpperCamelCase__ )
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFResNetModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A_ = model_class(UpperCamelCase__ )
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ = layer_type
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFResNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> str:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
A_ = model(**UpperCamelCase__ )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase__ , atol=1e-4 ) )
| 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import argparse
import datetime
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
A_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
A_ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
A_ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
A_ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
A_ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
A_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
A_ = datetime.date(int(UpperCAmelCase__ ), int(UpperCAmelCase__ ), int(UpperCAmelCase__ ) )
# Start math
if m <= 2:
A_ = y - 1
A_ = m + 12
# maths var
A_ = int(str(UpperCAmelCase__ )[:2] )
A_ = int(str(UpperCAmelCase__ )[2:] )
A_ = int(2.6 * m - 5.39 )
A_ = int(c / 4 )
A_ = int(k / 4 )
A_ = int(d + k )
A_ = int(t + u + v + x )
A_ = int(z - (2 * c) )
A_ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
A_ = F'''Your date {date_input}, is a {days[str(UpperCAmelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__lowerCamelCase = parser.parse_args()
zeller(args.date_input)
| 667 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A__ :
def __init__( self , UpperCamelCase__ = "cpu" , UpperCamelCase__ = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
A_ = device
A_ = CLIPTokenizerFast.from_pretrained(UpperCamelCase__ )
A_ = [0.48145466, 0.4578275, 0.40821073]
A_ = [0.26862954, 0.26130258, 0.27577711]
A_ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A_ = torchvision.transforms.Resize(224 )
A_ = torchvision.transforms.CenterCrop(224 )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.resize(UpperCamelCase__ )
A_ = self.center_crop(UpperCamelCase__ )
A_ = self.normalize(UpperCamelCase__ )
return images
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.tokenizer(text=UpperCamelCase__ , **UpperCamelCase__ )
A_ = self.preprocess_img(UpperCamelCase__ )
A_ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=10 , UpperCamelCase__=0.01 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="image" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> None:
'''simple docstring'''
super().__init__()
A_ = None
A_ = device if device else get_device()
if vqgan:
A_ = vqgan
else:
A_ = load_vqgan(self.device , conf_path=UpperCamelCase__ , ckpt_path=UpperCamelCase__ )
self.vqgan.eval()
if clip:
A_ = clip
else:
A_ = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A_ = ProcessorGradientFlow(device=self.device )
A_ = iterations
A_ = lr
A_ = log
A_ = make_grid
A_ = return_val
A_ = quantize
A_ = self.vqgan.decoder.z_shape
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=5 , UpperCamelCase__=True ) -> List[str]:
'''simple docstring'''
A_ = []
if output_path is None:
A_ = """./animation.gif"""
if input_path is None:
A_ = self.save_path
A_ = sorted(glob(input_path + """/*""" ) )
if not len(UpperCamelCase__ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCamelCase__ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A_ = total_duration / len(UpperCamelCase__ )
A_ = [frame_duration] * len(UpperCamelCase__ )
if extend_frames:
A_ = 1.5
A_ = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCamelCase__ ) )
imageio.mimsave(UpperCamelCase__ , UpperCamelCase__ , duration=UpperCamelCase__ )
print(f'''gif saved to {output_path}''' )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A_ = preprocess(Image.open(UpperCamelCase__ ) , target_image_size=256 ).to(self.device )
A_ = preprocess_vqgan(UpperCamelCase__ )
A_ , *A_ = self.vqgan.encode(UpperCamelCase__ )
return z
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.latent.detach().requires_grad_()
A_ = base_latent + transform_vector
if self.quantize:
A_ , *A_ = self.vqgan.quantize(UpperCamelCase__ )
else:
A_ = trans_latent
return self.vqgan.decode(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.clip_preprocessor(text=UpperCamelCase__ , images=UpperCamelCase__ , return_tensors="""pt""" , padding=UpperCamelCase__ )
A_ = self.clip(**UpperCamelCase__ )
A_ = clip_outputs.logits_per_image
if weights is not None:
A_ = similarity_logits * weights
return similarity_logits.sum()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCamelCase__ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A_ = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCamelCase__ , weights=neg_prompts["""weights"""] )
else:
A_ = torch.tensor([1] , device=self.device )
A_ = -torch.log(UpperCamelCase__ ) + torch.log(UpperCamelCase__ )
return loss
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = torch.randn_like(self.latent , requires_grad=UpperCamelCase__ , device=self.device )
A_ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A_ = self._add_vector(UpperCamelCase__ )
A_ = loop_post_process(UpperCamelCase__ )
A_ = self._get_CLIP_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print("""CLIP loss""" , UpperCamelCase__ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
wandb.init(reinit=UpperCamelCase__ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A_ = Image.open(UpperCamelCase__ )
A_ = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(UpperCamelCase__ ) )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if not prompts:
return []
A_ = []
A_ = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCamelCase__ , (tuple, list) ):
A_ = prompt[0]
A_ = float(prompt[1] )
elif ":" in prompt:
A_ , A_ = prompt.split(""":""" )
A_ = float(UpperCamelCase__ )
else:
A_ = prompt
A_ = 1.0
processed_prompts.append(UpperCamelCase__ )
weights.append(UpperCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase__ , device=self.device ),
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , ) -> Tuple:
'''simple docstring'''
if image_path:
A_ = self._get_latent(UpperCamelCase__ )
else:
A_ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
A_ = self.process_prompts(UpperCamelCase__ )
A_ = self.process_prompts(UpperCamelCase__ )
if save_final and save_path is None:
A_ = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
A_ = save_path + """_""" + get_timestamp()
os.makedirs(UpperCamelCase__ )
A_ = save_path
A_ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCamelCase__ ) )
A_ = loop_post_process(UpperCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
if show_intermediate:
show_pil(UpperCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCamelCase__ )} )
if show_final:
show_pil(UpperCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def snake_case_ ( self , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = self.get_qformer_tokenizer()
A_ = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCamelCase__ , return_tensors="""np""" )
A_ = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = self.get_qformer_tokenizer()
A_ = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A_ = """lower newer"""
A_ = processor(text=UpperCamelCase__ )
A_ = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
A_ = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = self.get_qformer_tokenizer()
A_ = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = self.get_qformer_tokenizer()
A_ = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCamelCase__ )
A_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = self.get_qformer_tokenizer()
A_ = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 667 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 1 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if number != int(UpperCAmelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A_ = [-1] * (number + 1)
A_ = 0
for i in range(1, number + 1 ):
A_ = sys.maxsize
A_ = int(math.sqrt(UpperCAmelCase__ ) )
for j in range(1, root + 1 ):
A_ = 1 + answers[i - (j**2)]
A_ = min(UpperCAmelCase__, UpperCAmelCase__ )
A_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , ) -> Dict:
'''simple docstring'''
A_ = size if size is not None else {"""shortest_edge""": 20}
A_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
A_ = do_flip_channel_order
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = MobileViTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_flip_channel_order""" ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 667 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A_ = 4
A_ = (1 << p) - 1
for _ in range(p - 2 ):
A_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 667 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__, 2 ) - pow(UpperCAmelCase__, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__, 2 ) - pow(UpperCAmelCase__, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__, 2 ) + pow(UpperCAmelCase__, 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = ["pixel_values"]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 0.9 , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = size if size is not None else {"""shortest_edge""": 224}
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
A_ = do_resize
A_ = size
A_ = crop_pct
A_ = resample
A_ = do_center_crop
A_ = crop_size
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
A_ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A_ = int(size["""height"""] / crop_pct )
else:
A_ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(UpperCamelCase__ ) )
A_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
else:
if "shortest_edge" in size:
A_ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
elif "height" in size and "width" in size:
A_ = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(UpperCamelCase__ ) )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
A_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
A_ = do_resize if do_resize is not None else self.do_resize
A_ = crop_pct if crop_pct is not None else self.crop_pct
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
A_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A_ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , crop_pct=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A_ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A_ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 667 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaImgaImgPipeline
lowercase = ["image_embeds", "negative_image_embeds", "image"]
lowercase = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowercase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
A_ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.dummy_unet
A_ = self.dummy_movq
A_ = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ = DDIMScheduler(**UpperCamelCase__ )
A_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.images
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ = """A red cartoon frog, 4k"""
A_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A_ = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
A_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A_ = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 667 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__lowerCamelCase = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class A__ ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase__ = " " ) -> int:
'''simple docstring'''
A_ = sentence_delimiter
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return list(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__lowerCamelCase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCamelCase = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__lowerCamelCase = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Tuple:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
A_ = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 667 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
A_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1 / 1_23_45 ) -> int:
A_ = 0
A_ = 0
A_ = 3
while True:
A_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase__ ):
A_ = int(UpperCAmelCase__ )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase__ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( _snake_case ):
lowercase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = emb_layer_norm_before
A_ = token_dropout
A_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = EsmFoldConfig(**UpperCamelCase__ )
A_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A_ = get_default_vocab_list()
else:
A_ = vocab_list
else:
A_ = None
A_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A_ = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
lowercase = None
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = 0
lowercase = True
lowercase = False
lowercase = 128
lowercase = None
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
if self.trunk is None:
A_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A_ = TrunkConfig(**self.trunk )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = asdict(self )
A_ = self.trunk.to_dict()
return output
@dataclass
class A__ :
lowercase = 48
lowercase = 1_024
lowercase = 128
lowercase = 32
lowercase = 32
lowercase = 32
lowercase = 0
lowercase = 0
lowercase = False
lowercase = 4
lowercase = 128
lowercase = None
def snake_case_ ( self ) -> Any:
'''simple docstring'''
if self.structure_module is None:
A_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
A_ = self.sequence_state_dim // self.sequence_head_width
A_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = asdict(self )
A_ = self.structure_module.to_dict()
return output
@dataclass
class A__ :
lowercase = 384
lowercase = 128
lowercase = 16
lowercase = 128
lowercase = 12
lowercase = 4
lowercase = 8
lowercase = 0.1
lowercase = 8
lowercase = 1
lowercase = 2
lowercase = 7
lowercase = 10
lowercase = 1e-8
lowercase = 1e5
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return asdict(self )
def UpperCAmelCase__ ( ) -> List[str]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
class A__ : # Public class to implement a graph
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = row
A_ = col
A_ = graph
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
# Checking all 8 elements surrounding nth element
A_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A_ = [-1, 0, 1, -1, 1, -1, 0, 1]
A_ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__ )
def snake_case_ ( self ) -> int: # And finally, count all islands.
'''simple docstring'''
A_ = [[False for j in range(self.COL )] for i in range(self.ROW )]
A_ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
count += 1
return count
| 667 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
# Initialise PyTorch model
A_ = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A_ = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 667 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ = 16 , UpperCamelCase__ = 88 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 32 , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "geglu" , UpperCamelCase__ = None , ) -> str:
'''simple docstring'''
super().__init__()
A_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , in_channels=UpperCamelCase__ , num_layers=UpperCamelCase__ , dropout=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , cross_attention_dim=UpperCamelCase__ , attention_bias=UpperCamelCase__ , sample_size=UpperCamelCase__ , num_vector_embeds=UpperCamelCase__ , activation_fn=UpperCamelCase__ , num_embeds_ada_norm=UpperCamelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A_ = [1, 0]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = True , ) -> str:
'''simple docstring'''
A_ = hidden_states
A_ = []
A_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A_ = self.transformer_index_for_condition[i]
A_ = self.transformers[transformer_index](
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__ , cross_attention_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase__ )
| 667 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 667 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase__ ) , torch_builtin(UpperCamelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase__ ) , gelu_new(UpperCamelCase__ ) ) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ = get_activation("""gelu""" )
A_ = get_activation("""gelu_10""" )
A_ = torch_builtin(UpperCamelCase__ )
A_ = geluaa(UpperCamelCase__ )
A_ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase__ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase__ ):
get_activation(UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = get_activation("""gelu""" )
A_ = 1
A_ = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase__ ):
A_ = acta.a
| 667 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "mgp-str"
def __init__( self , UpperCamelCase__=[32, 128] , UpperCamelCase__=4 , UpperCamelCase__=3 , UpperCamelCase__=27 , UpperCamelCase__=38 , UpperCamelCase__=50257 , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=4.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=0.02 , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 667 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = "AutoTokenizer"
lowercase = ["tokenizer"]
lowercase = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
A_ = speaker_embeddings
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , UpperCamelCase__="speaker_embeddings_path.json" , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
A_ = get_file_from_repo(
UpperCamelCase__ , UpperCamelCase__ , subfolder=kwargs.pop("""subfolder""" , UpperCamelCase__ ) , cache_dir=kwargs.pop("""cache_dir""" , UpperCamelCase__ ) , force_download=kwargs.pop("""force_download""" , UpperCamelCase__ ) , proxies=kwargs.pop("""proxies""" , UpperCamelCase__ ) , resume_download=kwargs.pop("""resume_download""" , UpperCamelCase__ ) , local_files_only=kwargs.pop("""local_files_only""" , UpperCamelCase__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , UpperCamelCase__ ) , revision=kwargs.pop("""revision""" , UpperCamelCase__ ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(UpperCamelCase__ , UpperCamelCase__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
A_ = None
else:
with open(UpperCamelCase__ ) as speaker_embeddings_json:
A_ = json.load(UpperCamelCase__ )
else:
A_ = None
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
return cls(tokenizer=UpperCamelCase__ , speaker_embeddings=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="speaker_embeddings_path.json" , UpperCamelCase__="speaker_embeddings" , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCamelCase__ , UpperCamelCase__ , """v2""" ) , exist_ok=UpperCamelCase__ )
A_ = {}
A_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ = self._load_voice_preset(UpperCamelCase__ )
A_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , UpperCamelCase__ , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=UpperCamelCase__ , )
A_ = os.path.join(UpperCamelCase__ , f'''{prompt_key}_{key}.npy''' )
A_ = tmp_dict
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
super().save_pretrained(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ = None , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.speaker_embeddings[voice_preset]
A_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
A_ = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , UpperCamelCase__ ) , cache_dir=kwargs.pop("""cache_dir""" , UpperCamelCase__ ) , force_download=kwargs.pop("""force_download""" , UpperCamelCase__ ) , proxies=kwargs.pop("""proxies""" , UpperCamelCase__ ) , resume_download=kwargs.pop("""resume_download""" , UpperCamelCase__ ) , local_files_only=kwargs.pop("""local_files_only""" , UpperCamelCase__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , UpperCamelCase__ ) , revision=kwargs.pop("""revision""" , UpperCamelCase__ ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
A_ = np.load(UpperCamelCase__ )
return voice_preset_dict
def snake_case_ ( self , UpperCamelCase__ = None ) -> int:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="pt" , UpperCamelCase__=256 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
if voice_preset is not None and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ = self._load_voice_preset(UpperCamelCase__ )
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not voice_preset.endswith(""".npz""" ):
A_ = voice_preset + """.npz"""
A_ = np.load(UpperCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCamelCase__ , **UpperCamelCase__ )
A_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
A_ = self.tokenizer(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
if voice_preset is not None:
A_ = voice_preset
return encoded_text
| 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0.2 , UpperCamelCase__=0.2 ) -> Dict:
'''simple docstring'''
A_ = bp_numa
A_ = bp_numa
A_ = bp_numa
A_ = conva_get[:2]
A_ = conva_get[2]
A_ = size_pa
A_ = rate_w
A_ = rate_t
A_ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A_ = -2 * np.random.rand(self.conva[1] ) + 1
A_ = -2 * np.random.rand(self.num_bpa ) + 1
A_ = -2 * np.random.rand(self.num_bpa ) + 1
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
# save model dict with pickle
A_ = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(UpperCamelCase__ , """wb""" ) as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Model saved: {save_path}''' )
@classmethod
def snake_case_ ( cls , UpperCamelCase__ ) -> int:
'''simple docstring'''
# read saved model
with open(UpperCamelCase__ , """rb""" ) as f:
A_ = pickle.load(UpperCamelCase__ ) # noqa: S301
A_ = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
A_ = model_dic.get("""size_pooling1""" )
A_ = model_dic.get("""num_bp1""" )
A_ = model_dic.get("""num_bp2""" )
A_ = model_dic.get("""num_bp3""" )
A_ = model_dic.get("""rate_weight""" )
A_ = model_dic.get("""rate_thre""" )
# create model instance
A_ = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# modify model parameter
A_ = model_dic.get("""w_conv1""" )
A_ = model_dic.get("""wkj""" )
A_ = model_dic.get("""vji""" )
A_ = model_dic.get("""thre_conv1""" )
A_ = model_dic.get("""thre_bp2""" )
A_ = model_dic.get("""thre_bp3""" )
return conv_ins
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return round(UpperCamelCase__ , 3 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
# convolution process
A_ = convs[0]
A_ = convs[1]
A_ = np.shape(UpperCamelCase__ )[0]
# get the data slice of original image data, data_focus
A_ = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__ ):
A_ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
A_ = []
A_ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCamelCase__ ):
A_ = []
for i_focus in range(len(UpperCamelCase__ ) ):
A_ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__ ) )
A_ = np.asmatrix(UpperCamelCase__ ).reshape(
UpperCamelCase__ , UpperCamelCase__ )
data_featuremap.append(UpperCamelCase__ )
# expanding the data slice to One dimenssion
A_ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__ ) )
A_ = np.asarray(UpperCamelCase__ )
return focus_list, data_featuremap
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="average_pool" ) -> List[Any]:
'''simple docstring'''
# pooling process
A_ = len(featuremaps[0] )
A_ = int(size_map / size_pooling )
A_ = []
for i_map in range(len(UpperCamelCase__ ) ):
A_ = featuremaps[i_map]
A_ = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
A_ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__ ) )
A_ = np.asmatrix(UpperCamelCase__ ).reshape(UpperCamelCase__ , UpperCamelCase__ )
featuremap_pooled.append(UpperCamelCase__ )
return featuremap_pooled
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
# expanding three dimension data to one dimension list
A_ = []
for i in range(len(UpperCamelCase__ ) ):
A_ = np.shape(data[i] )
A_ = data[i].reshape(1 , shapes[0] * shapes[1] )
A_ = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__ )
A_ = np.asarray(UpperCamelCase__ )
return data_expanded
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
# expanding matrix to one dimension list
A_ = np.asarray(UpperCamelCase__ )
A_ = np.shape(UpperCamelCase__ )
A_ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = []
A_ = 0
for i_map in range(UpperCamelCase__ ):
A_ = np.ones((size_map, size_map) )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
A_ = pd_pool[
i_pool
]
A_ = i_pool + 1
A_ = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(UpperCamelCase__ )
return pd_all
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=bool ) -> List[Any]:
'''simple docstring'''
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(UpperCamelCase__ )) )
print((""" - - Shape: Teach_Data """, np.shape(UpperCamelCase__ )) )
A_ = 0
A_ = []
A_ = 10000
while rp < n_repeat and mse >= error_accuracy:
A_ = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(UpperCamelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
A_ = np.asmatrix(datas_train[p] )
A_ = np.asarray(datas_teach[p] )
A_ , A_ = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ = self.pooling(UpperCamelCase__ , self.size_poolinga )
A_ = np.shape(UpperCamelCase__ )
A_ = self._expand(UpperCamelCase__ )
A_ = data_bp_input
A_ = np.dot(UpperCamelCase__ , self.vji.T ) - self.thre_bpa
A_ = self.sig(UpperCamelCase__ )
A_ = np.dot(UpperCamelCase__ , self.wkj.T ) - self.thre_bpa
A_ = self.sig(UpperCamelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A_ = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa) ) )
A_ = np.multiply(
np.dot(UpperCamelCase__ , self.wkj ) , np.multiply(UpperCamelCase__ , (1 - bp_outa) ) )
A_ = np.dot(UpperCamelCase__ , self.vji )
A_ = pd_i_all / (self.size_poolinga * self.size_poolinga)
A_ = pd_conva_pooled.T.getA().tolist()
A_ = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A_ = self._expand_mat(pd_conva_all[k_conv] )
A_ = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A_ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A_ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A_ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A_ = self.thre_bpa - pd_k_all * self.rate_thre
A_ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A_ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A_ = rp + 1
A_ = error_count / patterns
all_mse.append(UpperCamelCase__ )
def draw_error():
A_ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCamelCase__ , """+-""" )
plt.plot(UpperCamelCase__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(UpperCamelCase__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
# model predict
A_ = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(UpperCamelCase__ )) )
for p in range(len(UpperCamelCase__ ) ):
A_ = np.asmatrix(datas_test[p] )
A_ , A_ = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ = self.pooling(UpperCamelCase__ , self.size_poolinga )
A_ = self._expand(UpperCamelCase__ )
A_ = data_bp_input
A_ = bp_outa * self.vji.T - self.thre_bpa
A_ = self.sig(UpperCamelCase__ )
A_ = bp_outa * self.wkj.T - self.thre_bpa
A_ = self.sig(UpperCamelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
A_ = [list(map(self.do_round , UpperCamelCase__ ) ) for each in produce_out]
return np.asarray(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
# return the data of image after convoluting process so we can check it out
A_ = np.asmatrix(UpperCamelCase__ )
A_ , A_ = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A_ = self.pooling(UpperCamelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__lowerCamelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__lowerCamelCase = 0
__lowerCamelCase = 0Xe000
__lowerCamelCase = 0Xe001
__lowerCamelCase = 0Xe002
__lowerCamelCase = 0Xe003
__lowerCamelCase = 0Xe004
# Maps special codepoints to human-readable names.
__lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A__ ( _snake_case ):
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=False , UpperCamelCase__=2048 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
A_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A_ = UNICODE_VOCAB_SIZE
A_ = len(self._special_codepoints )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return list(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return "".join(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
A_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple:
'''simple docstring'''
return ()
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = np.full((len(UpperCAmelCase__ ), sequence_length, 2), UpperCAmelCase__ )
else:
A_ = np.full((len(UpperCAmelCase__ ), sequence_length), UpperCAmelCase__ )
for i, tensor in enumerate(UpperCAmelCase__ ):
if padding_side == "right":
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = tensor[:sequence_length]
else:
A_ = tensor[:sequence_length]
else:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = tensor[:sequence_length]
else:
A_ = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = ord(UpperCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
A_ = unicodedata.category(UpperCAmelCase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = True
lowercase = None
lowercase = None
lowercase = -100
lowercase = "pt"
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
import torch
A_ = """label""" if """label""" in features[0].keys() else """labels"""
A_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
A_ = self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
A_ = torch.tensor(batch["""entity_ids"""] ).shape[1]
A_ = self.tokenizer.padding_side
if padding_side == "right":
A_ = [
list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels
]
else:
A_ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels
]
A_ = [feature["""ner_tags"""] for feature in features]
A_ = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ )
A_ = [feature["""original_entity_spans"""] for feature in features]
A_ = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ )
A_ = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 667 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
from collections import deque
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = len(UpperCAmelCase__ )
A_ = deque()
A_ = [False for _ in range(UpperCAmelCase__ )]
A_ = [-1 for _ in range(UpperCAmelCase__ )]
A_ = index_of[:]
def strong_connect(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
A_ = index # the number when this node is seen
A_ = index # lowest rank node reachable from here
index += 1
stack.append(UpperCAmelCase__ )
A_ = True
for w in g[v]:
if index_of[w] == -1:
A_ = strong_connect(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A_ = []
A_ = stack.pop()
A_ = False
component.append(UpperCAmelCase__ )
while w != v:
A_ = stack.pop()
A_ = False
component.append(UpperCAmelCase__ )
components.append(UpperCAmelCase__ )
return index
A_ = []
for v in range(UpperCAmelCase__ ):
if index_of[v] == -1:
strong_connect(UpperCAmelCase__, 0, UpperCAmelCase__ )
return components
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = [[] for _ in range(UpperCAmelCase__ )]
for u, v in edges:
g[u].append(UpperCAmelCase__ )
return g
if __name__ == "__main__":
# Test
__lowerCamelCase = 7
__lowerCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__lowerCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__lowerCamelCase = [(u, v) for u, v in zip(source, target)]
__lowerCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 667 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 1 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
__lowerCamelCase = 8.314462 # Unit - J mol-1 K-1
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 667 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( _snake_case , _snake_case ):
@register_to_config
def __init__( self , UpperCamelCase__ = 32 , UpperCamelCase__ = 64 , UpperCamelCase__ = 20 , UpperCamelCase__ = 768 , UpperCamelCase__=77 , UpperCamelCase__=4 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = "silu" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "linear" , UpperCamelCase__ = "prd" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
A_ = num_attention_heads
A_ = attention_head_dim
A_ = num_attention_heads * attention_head_dim
A_ = additional_embeddings
A_ = time_embed_dim or inner_dim
A_ = embedding_proj_dim or embedding_dim
A_ = clip_embed_dim or embedding_dim
A_ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0 )
A_ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ )
A_ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
if embedding_proj_norm_type is None:
A_ = None
elif embedding_proj_norm_type == "layer":
A_ = nn.LayerNorm(UpperCamelCase__ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A_ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
if encoder_hid_proj_type is None:
A_ = None
elif encoder_hid_proj_type == "linear":
A_ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__ ) )
if added_emb_type == "prd":
A_ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__ ) )
elif added_emb_type is None:
A_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__ )
] )
if norm_in_type == "layer":
A_ = nn.LayerNorm(UpperCamelCase__ )
elif norm_in_type is None:
A_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
A_ = nn.LayerNorm(UpperCamelCase__ )
A_ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
A_ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__ )
A_ = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
A_ = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
A_ = {}
def fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , """set_processor""" ):
A_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , """set_processor""" ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> List[Any]:
'''simple docstring'''
A_ = hidden_states.shape[0]
A_ = timestep
if not torch.is_tensor(UpperCamelCase__ ):
A_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
A_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
A_ = self.time_proj(UpperCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A_ = timesteps_projected.to(dtype=self.dtype )
A_ = self.time_embedding(UpperCamelCase__ )
if self.embedding_proj_norm is not None:
A_ = self.embedding_proj_norm(UpperCamelCase__ )
A_ = self.embedding_proj(UpperCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A_ = self.encoder_hidden_states_proj(UpperCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A_ = self.proj_in(UpperCamelCase__ )
A_ = self.positional_embedding.to(hidden_states.dtype )
A_ = []
A_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A_ = hidden_states[:, None, :]
A_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A_ = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase__ , -1 , -1 )
additional_embeds.append(UpperCamelCase__ )
A_ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A_ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A_ = hidden_states + positional_embeddings
if attention_mask is not None:
A_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
A_ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
A_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A_ = self.norm_in(UpperCamelCase__ )
for block in self.transformer_blocks:
A_ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = self.norm_out(UpperCamelCase__ )
if self.prd_embedding is not None:
A_ = hidden_states[:, -1]
else:
A_ = hidden_states[:, additional_embeddings_len:]
A_ = self.proj_to_clip_embeddings(UpperCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 667 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = len(UpperCAmelCase__ )
A_ = int(math.floor(math.sqrt(UpperCAmelCase__ ) ) )
A_ = 0
while arr[min(UpperCAmelCase__, UpperCAmelCase__ ) - 1] < x:
A_ = step
step += int(math.floor(math.sqrt(UpperCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
A_ = prev + 1
if prev == min(UpperCAmelCase__, UpperCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase = [int(item) for item in user_input.split(''',''')]
__lowerCamelCase = int(input('''Enter the number to be searched:\n'''))
__lowerCamelCase = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f"""Number {x} is at index {res}""")
| 667 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 1 |
'''simple docstring'''
import operator as op
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = []
A_ = lambda UpperCAmelCase__, UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation
A_ = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ), """Action""".center(12 ), """Stack""", sep=""" | """ )
print("""-""" * (30 + len(UpperCAmelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCAmelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("""push(""" + x + """)""").ljust(12 ), """,""".join(UpperCAmelCase__ ), sep=""" | """ )
else:
A_ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ), ("""pop(""" + b + """)""").ljust(12 ), """,""".join(UpperCAmelCase__ ), sep=""" | """ )
A_ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ), ("""pop(""" + a + """)""").ljust(12 ), """,""".join(UpperCAmelCase__ ), sep=""" | """ )
stack.append(
str(opr[x](int(UpperCAmelCase__ ), int(UpperCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("""push(""" + a + x + b + """)""").ljust(12 ), """,""".join(UpperCAmelCase__ ), sep=""" | """, )
return int(stack[0] )
if __name__ == "__main__":
__lowerCamelCase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 667 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = VQModel
lowercase = "sample"
@property
def snake_case_ ( self , UpperCamelCase__=(32, 32) ) -> Optional[int]:
'''simple docstring'''
A_ = 4
A_ = 3
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ , A_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
A_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A_ = image.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ ).sample
A_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
| 667 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
A_ = bs[:]
A_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
A_ = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__, UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
A_ = set()
A_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ = char
return pairs
class A__ ( _snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
A_ = json.load(UpperCamelCase__ )
A_ = {v: k for k, v in self.encoder.items()}
A_ = errors # how to handle errors in decoding
A_ = bytes_to_unicode()
A_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="""utf-8""" ) as merges_handle:
A_ = merges_handle.read().split("""\n""" )[1:-1]
A_ = [tuple(merge.split() ) for merge in bpe_merges]
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = {}
A_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ = tuple(UpperCamelCase__ )
A_ = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A_ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ = bigram
A_ = []
A_ = 0
while i < len(UpperCamelCase__ ):
try:
A_ = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ = tuple(UpperCamelCase__ )
A_ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A_ = get_pairs(UpperCamelCase__ )
A_ = """ """.join(UpperCamelCase__ )
A_ = word
return word
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = []
for token in re.findall(self.pat , UpperCamelCase__ ):
A_ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(""" """ ) )
return bpe_tokens
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = """""".join(UpperCamelCase__ )
A_ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + """\n""" )
A_ = 0
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
A_ = token_index
writer.write(""" """.join(UpperCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
A_ = """ """ + text
return (text, kwargs)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> dict:
'''simple docstring'''
A_ = super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
A_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
A_ = len(UpperCamelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 667 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 1 |
'''simple docstring'''
import cmath
import math
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> complex:
A_ = math.radians(UpperCAmelCase__ )
A_ = math.radians(UpperCAmelCase__ )
# Convert voltage and current to rectangular form
A_ = cmath.rect(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cmath.rect(UpperCAmelCase__, UpperCAmelCase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( _snake_case ):
@staticmethod
@abstractmethod
def snake_case_ ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 1 |
'''simple docstring'''
import requests
__lowerCamelCase = '''''' # <-- Put your OpenWeatherMap appid here!
__lowerCamelCase = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase__ ( UpperCAmelCase__ = "Chicago", UpperCAmelCase__ = APPID ) -> dict:
return requests.get(URL_BASE + """weather""", params=locals() ).json()
def UpperCAmelCase__ ( UpperCAmelCase__ = "Kolkata, India", UpperCAmelCase__ = APPID ) -> dict:
return requests.get(URL_BASE + """forecast""", params=locals() ).json()
def UpperCAmelCase__ ( UpperCAmelCase__ = 55.68, UpperCAmelCase__ = 12.57, UpperCAmelCase__ = APPID ) -> dict:
return requests.get(URL_BASE + """onecall""", params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowerCamelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 667 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A_ = 1
A_ = 1
while repunit:
A_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00_00 ) -> int:
A_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(UpperCAmelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import math
__lowerCamelCase = 10
__lowerCamelCase = 7
__lowerCamelCase = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCAmelCase__ ( UpperCAmelCase__ = 20 ) -> str:
A_ = math.comb(UpperCAmelCase__, UpperCAmelCase__ )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR, UpperCAmelCase__ )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 667 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
class A__ ( _snake_case , _snake_case ):
lowercase = 1
@register_to_config
def __init__( self , UpperCamelCase__ = 2000 , UpperCamelCase__ = 0.15 , UpperCamelCase__ = 0.01 , UpperCamelCase__ = 1348.0 , UpperCamelCase__ = 1e-5 , UpperCamelCase__ = 1 , ) -> List[Any]:
'''simple docstring'''
# standard deviation of the initial noise distribution
A_ = sigma_max
# setable values
A_ = None
self.set_sigmas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
A_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ = torch.linspace(1 , UpperCamelCase__ , UpperCamelCase__ , device=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
A_ = sigma_min if sigma_min is not None else self.config.sigma_min
A_ = sigma_max if sigma_max is not None else self.config.sigma_max
A_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ )
A_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ = torch.exp(torch.linspace(math.log(UpperCamelCase__ ) , math.log(UpperCamelCase__ ) , UpperCamelCase__ ) )
A_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
A_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ = timesteps.to(self.discrete_sigmas.device )
A_ = self.discrete_sigmas[timesteps].to(sample.device )
A_ = self.get_adjacent_sigma(UpperCamelCase__ , UpperCamelCase__ ).to(sample.device )
A_ = torch.zeros_like(UpperCamelCase__ )
A_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ = diffusion.unsqueeze(-1 )
A_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase__ , device=sample.device , dtype=sample.dtype )
A_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase__ , prev_sample_mean=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ = step_size.unsqueeze(-1 )
A_ = sample + step_size * model_output
A_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A_ = timesteps.to(original_samples.device )
A_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase__ ) * sigmas[:, None, None, None]
)
A_ = noise + original_samples
return noisy_samples
def __len__( self ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 667 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 1 |
'''simple docstring'''
import sys
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ ( UpperCAmelCase__ = N ) -> int:
A_ = -sys.maxsize - 1
for i in range(len(UpperCAmelCase__ ) - 12 ):
A_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCamelCase = 10
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
for i in range(UpperCAmelCase__, UpperCAmelCase__ ):
if array[i] == target:
return i
return -1
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = 0
A_ = len(UpperCAmelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = (left + right) // 3 + 1
A_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A_ = one_third - 1
elif array[two_third] < target:
A_ = two_third + 1
else:
A_ = one_third + 1
A_ = two_third - 1
else:
return -1
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if left < right:
if right - left < precision:
return lin_search(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = (left + right) // 3 + 1
A_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCAmelCase__, one_third - 1, UpperCAmelCase__, UpperCAmelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
return rec_ternary_search(one_third + 1, two_third - 1, UpperCAmelCase__, UpperCAmelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = input('''Enter numbers separated by comma:\n''').strip()
__lowerCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCamelCase = int(input('''Enter the number to be found in the list:\n''').strip())
__lowerCamelCase = ite_ternary_search(collection, target)
__lowerCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 667 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = {}
A_ = {}
A_ = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return super().__call__(UpperCamelCase__ , *UpperCamelCase__ , num_workers=UpperCamelCase__ , batch_size=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=64 , UpperCamelCase__ = 0 , UpperCamelCase__ = 512 / 1500 , UpperCamelCase__ = 32 , UpperCamelCase__ = 1 , ) -> List[Any]:
'''simple docstring'''
A_ = load_image(UpperCamelCase__ )
A_ = self.image_processor.size["""longest_edge"""]
A_ , A_ , A_ , A_ = self.image_processor.generate_crop_boxes(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = self.image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ = self.get_inference_context()
with inference_context():
A_ = self._ensure_tensor_on_device(UpperCamelCase__ , device=self.device )
A_ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ = image_embeddings
A_ = grid_points.shape[1]
A_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
A_ = grid_points[:, i : i + points_per_batch, :, :]
A_ = input_labels[:, i : i + points_per_batch]
A_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0.88 , UpperCamelCase__=0.95 , UpperCamelCase__=0 , UpperCamelCase__=1 , ) -> Optional[Any]:
'''simple docstring'''
A_ = model_inputs.pop("""input_boxes""" )
A_ = model_inputs.pop("""is_last""" )
A_ = model_inputs.pop("""original_sizes""" ).tolist()
A_ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ = self.model(**UpperCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ = model_outputs["""pred_masks"""]
A_ = self.image_processor.post_process_masks(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , binarize=UpperCamelCase__ )
A_ = model_outputs["""iou_scores"""]
A_ , A_ , A_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.7 , ) -> str:
'''simple docstring'''
A_ = []
A_ = []
A_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ = torch.cat(UpperCamelCase__ )
A_ = torch.cat(UpperCamelCase__ )
A_ , A_ , A_ , A_ = self.image_processor.post_process_for_mask_generation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = defaultdict(UpperCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase__ )
A_ = {}
if output_rle_mask:
A_ = rle_mask
if output_bboxes_mask:
A_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 667 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( _snake_case ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "LayoutLMv3ImageProcessor"
lowercase = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase__ , )
A_ = kwargs.pop("""feature_extractor""" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ = features["""words"""]
A_ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel values
A_ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ = images
return encoded_inputs
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}''' )
return images_with_overflow
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , )
return self.image_processor_class
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , )
return self.image_processor
| 667 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A__ ( _snake_case ):
lowercase = DistilBertTokenizer
lowercase = DistilBertTokenizerFast
lowercase = True
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
A_ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ )
A_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 667 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = "mask2former"
lowercase = ["swin"]
lowercase = {"hidden_size": "hidden_dim"}
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = 256 , UpperCamelCase__ = 256 , UpperCamelCase__ = 256 , UpperCamelCase__ = 1024 , UpperCamelCase__ = "relu" , UpperCamelCase__ = 6 , UpperCamelCase__ = 10 , UpperCamelCase__ = 8 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 2048 , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = 4 , UpperCamelCase__ = 255 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 2.0 , UpperCamelCase__ = 5.0 , UpperCamelCase__ = 5.0 , UpperCamelCase__ = 12544 , UpperCamelCase__ = 3.0 , UpperCamelCase__ = 0.75 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = True , UpperCamelCase__ = [4, 8, 16, 32] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
A_ = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = backbone_config.pop("""model_type""" )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
A_ = backbone_config
A_ = feature_size
A_ = mask_feature_size
A_ = hidden_dim
A_ = encoder_feedforward_dim
A_ = activation_function
A_ = encoder_layers
A_ = decoder_layers
A_ = num_attention_heads
A_ = dropout
A_ = dim_feedforward
A_ = pre_norm
A_ = enforce_input_projection
A_ = common_stride
A_ = ignore_value
A_ = num_queries
A_ = no_object_weight
A_ = class_weight
A_ = mask_weight
A_ = dice_weight
A_ = train_num_points
A_ = oversample_ratio
A_ = importance_sample_ratio
A_ = init_std
A_ = init_xavier_std
A_ = use_auxiliary_loss
A_ = feature_strides
A_ = output_auxiliary_logits
A_ = decoder_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return cls(
backbone_config=UpperCamelCase__ , **UpperCamelCase__ , )
def snake_case_ ( self ) -> Dict[str, any]:
'''simple docstring'''
A_ = copy.deepcopy(self.__dict__ )
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
| 667 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> int:
'''simple docstring'''
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 32
A_ = 2
A_ = 4
A_ = 37
A_ = """gelu"""
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = None
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFRoFormerModel(config=A__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(A__ )
A_ = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = True
A_ = TFRoFormerForCausalLM(config=A__ )
A_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A_ = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = TFRoFormerForMaskedLM(config=A__ )
A_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A_ = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFRoFormerForSequenceClassification(config=A__ )
A_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A_ = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.num_choices
A_ = TFRoFormerForMultipleChoice(config=A__ )
A_ = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFRoFormerForTokenClassification(config=A__ )
A_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A_ = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TFRoFormerForQuestionAnswering(config=A__ )
A_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A_ = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFRoFormerModelTester(self )
A_ = ConfigTester(self , config_class=A__ , hidden_size=37 )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(A__ )[0]
# TODO Replace vocab size
A_ = 50000
A_ = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
A_ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1e-4 )
@require_tf
class A__ ( unittest.TestCase ):
snake_case = 1e-4
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = tf.constant([[4, 10]] )
A_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
A_ = emba(input_ids.shape )
A_ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
A_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
A_ = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase ):
snake_case = 1e-4
def snake_case_ ( self ) -> int:
'''simple docstring'''
# 2,12,16,64
A_ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
A_ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
A_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
A_ = embed_positions([2, 16, 768] )[None, None, :, :]
A_ , A_ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
A_ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
A_ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""", [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""", ["""default""", 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, """IN_MEMORY_MAX_SIZE""", __A )
A_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A_ = dataset_size < in_memory_max_size
else:
A_ = False
A_ = is_small_dataset(__A )
assert result == expected
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCamelCase = data_utils.TransfoXLTokenizer
__lowerCamelCase = data_utils.TransfoXLCorpus
__lowerCamelCase = data_utils
__lowerCamelCase = data_utils
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_SCREAMING_SNAKE_CASE, """rb""" ) as fp:
A_ = pickle.load(_SCREAMING_SNAKE_CASE, encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A_ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
A_ = corpus.vocab.__dict__
torch.save(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
A_ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""", _SCREAMING_SNAKE_CASE )
A_ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A_ = os.path.abspath(_SCREAMING_SNAKE_CASE )
A_ = os.path.abspath(_SCREAMING_SNAKE_CASE )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A_ = TransfoXLConfig()
else:
A_ = TransfoXLConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
A_ = TransfoXLLMHeadModel(_SCREAMING_SNAKE_CASE )
A_ = load_tf_weights_in_transfo_xl(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
# Save pytorch-model
A_ = os.path.join(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
A_ = os.path.join(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
print(F'''Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict(), _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}''' )
with open(_SCREAMING_SNAKE_CASE, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = mock.Mock()
A_ = 500
A_ = {}
A_ = HTTPError
A_ = {}
# Download this model to make sure it's in the cache.
A_ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=A_ ) as mock_head:
A_ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = mock.Mock()
A_ = 500
A_ = {}
A_ = HTTPError
A_ = {}
# Download this model to make sure it's in the cache.
A_ = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=A_ ) as mock_head:
A_ = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
A_ = tempfile.mktemp()
with open(A_ , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , A_ )
A_ = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , A_ )
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class A__ ( unittest.TestCase ):
lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def snake_case_ ( cls ) -> Tuple:
'''simple docstring'''
A_ = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def snake_case_ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(A_ , """vocab.txt""" )
with open(A_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A_ = BertTokenizer(A_ )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
A_ = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id="""test-tokenizer""" , push_to_hub=A_ , use_auth_token=self._token )
A_ = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def snake_case_ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(A_ , """vocab.txt""" )
with open(A_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A_ = BertTokenizer(A_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
A_ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=A_ , use_auth_token=self._token )
A_ = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(A_ , """vocab.txt""" )
with open(A_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A_ = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
A_ = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(A_ , """vocab.txt""" )
with open(A_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A_ = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
A_ = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
A_ = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
A_ = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = Trie()
A_ = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ["""AB""", """C"""] )
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = [10, 20, 30, 40, 50, 60]
A_ = [2, 4, 6, 8, 10, 12]
A_ = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def snake_case_ ( self ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """max_weight must greater than zero.""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """Weight can not be negative.""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """Profit can not be negative.""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """max_weight must greater than zero.""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
A_ = 3_84
if "tiny" in model_name:
A_ = [3, 3, 9, 3]
A_ = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A_ = [3, 3, 27, 3]
A_ = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A_ = [3, 3, 27, 3]
A_ = [1_28, 2_56, 5_12, 10_24]
A_ = 5_12
if "large" in model_name:
A_ = [3, 3, 27, 3]
A_ = [1_92, 3_84, 7_68, 15_36]
A_ = 7_68
if "xlarge" in model_name:
A_ = [3, 3, 27, 3]
A_ = [2_56, 5_12, 10_24, 20_48]
A_ = 10_24
# set label information
A_ = 1_50
A_ = "huggingface/label-files"
A_ = "ade20k-id2label.json"
A_ = json.load(open(hf_hub_download(_A, _A, repo_type="""dataset""" ), """r""" ) )
A_ = {int(_A ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = ConvNextConfig(
depths=_A, hidden_sizes=_A, out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
A_ = UperNetConfig(
backbone_config=_A, auxiliary_in_channels=_A, num_labels=_A, idalabel=_A, labelaid=_A, )
return config
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = dct.pop(_A )
A_ = val
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
A_ = model_name_to_url[model_name]
A_ = torch.hub.load_state_dict_from_url(_A, map_location="""cpu""" )["state_dict"]
A_ = get_upernet_config(_A )
A_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A_ = state_dict.pop(_A )
if "bn" in key:
A_ = key.replace("""bn""", """batch_norm""" )
A_ = val
# rename keys
A_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A, _A, _A )
model.load_state_dict(_A )
# verify on image
A_ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
A_ = Image.open(requests.get(_A, stream=_A ).raw ).convert("""RGB""" )
A_ = SegformerImageProcessor()
A_ = processor(_A, return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A_ = model(_A )
if model_name == "upernet-convnext-tiny":
A_ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
A_ = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
A_ = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
A_ = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
A_ = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], _A, atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_A )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCamelCase = None
__lowerCamelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCamelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class A__ :
lowercase = True
lowercase = None
# Automatically constructed
lowercase = "PIL.Image.Image"
lowercase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase = field(default="Image" , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self ) -> Any:
'''simple docstring'''
return self.pa_type
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A_ = np.array(UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install \'Pillow\'.""" )
if token_per_repo_id is None:
A_ = {}
A_ = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(UpperCAmelCase_ ):
A_ = PIL.Image.open(UpperCAmelCase_ )
else:
A_ = path.split("""::""" )[-1]
try:
A_ = string_to_dict(UpperCAmelCase_ , config.HUB_DATASETS_URL )['repo_id']
A_ = token_per_repo_id.get(UpperCAmelCase_ )
except ValueError:
A_ = None
with xopen(UpperCAmelCase_ , """rb""" , use_auth_token=UpperCAmelCase_ ) as f:
A_ = BytesIO(f.read() )
A_ = PIL.Image.open(bytes_ )
else:
A_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ) -> int:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.binary() )
A_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() )
A_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ = storage.field("""bytes""" )
else:
A_ = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ = storage.field("""path""" )
else:
A_ = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() )
A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ = pa.array(
[encode_np_array(np.array(UpperCAmelCase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() )
A_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase_ , self.pa_type )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase__ ):
with xopen(UpperCAmelCase_ , """rb""" ) as f:
A_ = f.read()
return bytes_
A_ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ = pa.array(
[os.path.basename(UpperCAmelCase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase_ , self.pa_type )
def UpperCAmelCase__ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bytes:
A_ = BytesIO()
if image.format in list_image_compression_formats():
A_ = image.format
else:
A_ = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCamelCase_, format=lowerCamelCase_ )
return buffer.getvalue()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> dict:
if hasattr(lowerCamelCase_, """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCamelCase_ )}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
A_ = array.dtype
A_ = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
A_ = dtype.kind
A_ = dtype.itemsize
A_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ = dtype_byteorder + dtype_kind + str(lowerCamelCase_ )
A_ = np.dtype(lowerCamelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ = PIL.Image.fromarray(array.astype(lowerCamelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCamelCase_ )}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install \'Pillow\'.""" )
if objs:
A_ = first_non_null_value(lowerCamelCase_ )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCamelCase_, np.ndarray ):
A_ = no_op_if_value_is_null(lowerCamelCase_ )
return [obj_to_image_dict_func(lowerCamelCase_ ) for obj in objs]
elif isinstance(lowerCamelCase_, PIL.Image.Image ):
A_ = no_op_if_value_is_null(lowerCamelCase_ )
return [obj_to_image_dict_func(lowerCamelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["PoolFormerFeatureExtractor"]
__lowerCamelCase = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
from PIL import Image
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Image:
def brightness(UpperCAmelCase__ ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__A )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__lowerCamelCase = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = dataset
A_ = process
A_ = params
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.dataset[i]
A_ = self.process(UpperCamelCase_ , **self.params )
return processed
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
A_ = loader
A_ = infer
A_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A_ = None
A_ = loader_batch_size
# Internal bookkeeping
A_ = None
A_ = None
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.loader )
def __iter__( self ) -> str:
'''simple docstring'''
A_ = iter(self.loader )
return self
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Convert ModelOutput to tuple first
A_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A_ = self._loader_batch_data.__class__(UpperCamelCase_ )
self._loader_batch_index += 1
return result
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A_ = next(self.iterator )
A_ = self.infer(UpperCamelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase_ , torch.Tensor ):
A_ = processed
else:
A_ = list(processed.keys() )[0]
A_ = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
A_ = len(UpperCamelCase_ )
else:
A_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ = observed_batch_size
# Setting internal index to unwrap the batch
A_ = processed
A_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Dict:
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __iter__( self ) -> int:
'''simple docstring'''
A_ = iter(self.loader )
A_ = None
return self
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
if self.subiterator is None:
A_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A_ = self.infer(next(self.iterator ) , **self.params )
A_ = next(self.subiterator )
return processed
class A__ ( _snake_case ):
def __iter__( self ) -> Dict:
'''simple docstring'''
A_ = iter(self.loader )
return self
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A_ = False
A_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A_ = self.loader_batch_item()
A_ = item.pop("""is_last""" )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
while not is_last:
A_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase_ , torch.Tensor ):
A_ = processed
else:
A_ = list(processed.keys() )[0]
A_ = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
A_ = len(UpperCamelCase_ )
else:
A_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ = observed_batch_size
A_ = processed
A_ = 0
while self._loader_batch_index < self.loader_batch_size:
A_ = self.loader_batch_item()
A_ = item.pop("""is_last""" )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
else:
A_ = processed
A_ = item.pop("""is_last""" )
accumulator.append(UpperCamelCase_ )
return accumulator
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = dataset
A_ = key
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
return self.dataset[i][self.key]
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = dataset
A_ = keya
A_ = keya
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A__ ( _snake_case ):
lowercase = DistilBertTokenizer
lowercase = DistilBertTokenizerFast
lowercase = True
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
A_ = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
A_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
A_ = tokenizer.build_inputs_with_special_tokens(__A )
A_ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.